Lines Matching refs:d
18 static void freetgt(struct aoedev *d, struct aoetgt *t);
19 static void skbpoolfree(struct aoedev *d);
138 aoedev_put(struct aoedev *d) in aoedev_put() argument
143 d->ref--; in aoedev_put()
150 struct aoedev *d; in dummy_timer() local
152 d = from_timer(d, t, timer); in dummy_timer()
153 if (d->flags & DEVFL_TKILL) in dummy_timer()
155 d->timer.expires = jiffies + HZ; in dummy_timer()
156 add_timer(&d->timer); in dummy_timer()
160 aoe_failip(struct aoedev *d) in aoe_failip() argument
166 aoe_failbuf(d, d->ip.buf); in aoe_failip()
167 rq = d->ip.rq; in aoe_failip()
172 while ((bio = d->ip.nxbio)) { in aoe_failip()
174 d->ip.nxbio = bio->bi_next; in aoe_failip()
179 aoe_end_request(d, rq, 0); in aoe_failip()
191 aoe_failbuf(f->t->d, f->buf); in downdev_frame()
197 aoedev_downdev(struct aoedev *d) in aoedev_downdev() argument
203 d->flags &= ~DEVFL_UP; in aoedev_downdev()
207 head = &d->factive[i]; in aoedev_downdev()
211 head = &d->rexmitq; in aoedev_downdev()
216 tt = d->targets; in aoedev_downdev()
217 te = tt + d->ntargets; in aoedev_downdev()
224 aoe_failip(d); in aoedev_downdev()
227 if (d->blkq) { in aoedev_downdev()
229 blk_mq_freeze_queue(d->blkq); in aoedev_downdev()
230 blk_mq_quiesce_queue(d->blkq); in aoedev_downdev()
231 blk_mq_unquiesce_queue(d->blkq); in aoedev_downdev()
232 blk_mq_unfreeze_queue(d->blkq); in aoedev_downdev()
235 if (d->gd) in aoedev_downdev()
236 set_capacity(d->gd, 0); in aoedev_downdev()
243 user_req(char *s, size_t slen, struct aoedev *d) in user_req() argument
248 if (!d->gd) in user_req()
250 p = kbasename(d->gd->disk_name); in user_req()
251 lim = sizeof(d->gd->disk_name); in user_req()
252 lim -= p - d->gd->disk_name; in user_req()
260 freedev(struct aoedev *d) in freedev() argument
266 spin_lock_irqsave(&d->lock, flags); in freedev()
267 if (d->flags & DEVFL_TKILL in freedev()
268 && !(d->flags & DEVFL_FREEING)) { in freedev()
269 d->flags |= DEVFL_FREEING; in freedev()
272 spin_unlock_irqrestore(&d->lock, flags); in freedev()
276 del_timer_sync(&d->timer); in freedev()
277 if (d->gd) { in freedev()
278 aoedisk_rm_debugfs(d); in freedev()
279 del_gendisk(d->gd); in freedev()
280 put_disk(d->gd); in freedev()
281 blk_mq_free_tag_set(&d->tag_set); in freedev()
283 t = d->targets; in freedev()
284 e = t + d->ntargets; in freedev()
286 freetgt(d, *t); in freedev()
288 mempool_destroy(d->bufpool); in freedev()
289 skbpoolfree(d); in freedev()
290 minor_free(d->sysminor); in freedev()
292 spin_lock_irqsave(&d->lock, flags); in freedev()
293 d->flags |= DEVFL_FREED; in freedev()
294 spin_unlock_irqrestore(&d->lock, flags); in freedev()
306 struct aoedev *d, **dd; in flush() local
328 for (d = devlist; d; d = d->next) { in flush()
329 spin_lock(&d->lock); in flush()
330 if (d->flags & DEVFL_TKILL) in flush()
336 if (!user_req(buf, cnt, d)) in flush()
338 } else if ((!all && (d->flags & DEVFL_UP)) in flush()
339 || d->flags & skipflags in flush()
340 || d->nopen in flush()
341 || d->ref) in flush()
344 spin_unlock(&d->lock); in flush()
346 aoedev_downdev(d); in flush()
347 d->flags |= DEVFL_TKILL; in flush()
350 spin_unlock(&d->lock); in flush()
359 for (d = devlist; d; d = d->next) { in flush()
360 spin_lock(&d->lock); in flush()
361 if (d->flags & DEVFL_TKILL in flush()
362 && !(d->flags & DEVFL_FREEING)) { in flush()
363 spin_unlock(&d->lock); in flush()
365 freedev(d); in flush()
368 spin_unlock(&d->lock); in flush()
372 for (dd = &devlist, d = *dd; d; d = *dd) { in flush()
375 spin_lock(&d->lock); in flush()
376 if (d->flags & DEVFL_FREED) { in flush()
377 *dd = d->next; in flush()
378 doomed = d; in flush()
380 dd = &d->next; in flush()
382 spin_unlock(&d->lock); in flush()
427 skbpoolfree(struct aoedev *d) in skbpoolfree() argument
431 skb_queue_walk_safe(&d->skbpool, skb, tmp) in skbpoolfree()
434 __skb_queue_head_init(&d->skbpool); in skbpoolfree()
441 struct aoedev *d; in aoedev_by_aoeaddr() local
448 for (d=devlist; d; d=d->next) in aoedev_by_aoeaddr()
449 if (d->aoemajor == maj && d->aoeminor == min) { in aoedev_by_aoeaddr()
450 spin_lock(&d->lock); in aoedev_by_aoeaddr()
451 if (d->flags & DEVFL_TKILL) { in aoedev_by_aoeaddr()
452 spin_unlock(&d->lock); in aoedev_by_aoeaddr()
453 d = NULL; in aoedev_by_aoeaddr()
456 d->ref++; in aoedev_by_aoeaddr()
457 spin_unlock(&d->lock); in aoedev_by_aoeaddr()
460 if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0) in aoedev_by_aoeaddr()
462 d = kcalloc(1, sizeof *d, GFP_ATOMIC); in aoedev_by_aoeaddr()
463 if (!d) in aoedev_by_aoeaddr()
465 d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC); in aoedev_by_aoeaddr()
466 if (!d->targets) { in aoedev_by_aoeaddr()
467 kfree(d); in aoedev_by_aoeaddr()
468 d = NULL; in aoedev_by_aoeaddr()
471 d->ntargets = NTARGETS; in aoedev_by_aoeaddr()
472 INIT_WORK(&d->work, aoecmd_sleepwork); in aoedev_by_aoeaddr()
473 spin_lock_init(&d->lock); in aoedev_by_aoeaddr()
474 INIT_LIST_HEAD(&d->rq_list); in aoedev_by_aoeaddr()
475 skb_queue_head_init(&d->skbpool); in aoedev_by_aoeaddr()
476 timer_setup(&d->timer, dummy_timer, 0); in aoedev_by_aoeaddr()
477 d->timer.expires = jiffies + HZ; in aoedev_by_aoeaddr()
478 add_timer(&d->timer); in aoedev_by_aoeaddr()
479 d->bufpool = NULL; /* defer to aoeblk_gdalloc */ in aoedev_by_aoeaddr()
480 d->tgt = d->targets; in aoedev_by_aoeaddr()
481 d->ref = 1; in aoedev_by_aoeaddr()
483 INIT_LIST_HEAD(&d->factive[i]); in aoedev_by_aoeaddr()
484 INIT_LIST_HEAD(&d->rexmitq); in aoedev_by_aoeaddr()
485 d->sysminor = sysminor; in aoedev_by_aoeaddr()
486 d->aoemajor = maj; in aoedev_by_aoeaddr()
487 d->aoeminor = min; in aoedev_by_aoeaddr()
488 d->rttavg = RTTAVG_INIT; in aoedev_by_aoeaddr()
489 d->rttdev = RTTDEV_INIT; in aoedev_by_aoeaddr()
490 d->next = devlist; in aoedev_by_aoeaddr()
491 devlist = d; in aoedev_by_aoeaddr()
494 return d; in aoedev_by_aoeaddr()
498 freetgt(struct aoedev *d, struct aoetgt *t) in freetgt() argument