Lines Matching full:d
18 static void freetgt(struct aoedev *d, struct aoetgt *t);
19 static void skbpoolfree(struct aoedev *d);
72 pr_err("aoe: %s %d slots per shelf\n", in minor_get_static()
81 pr_err("aoe: %s with e%ld.%d\n", in minor_get_static()
138 aoedev_put(struct aoedev *d) in aoedev_put() argument
143 d->ref--; in aoedev_put()
150 struct aoedev *d; in dummy_timer() local
152 d = from_timer(d, t, timer); in dummy_timer()
153 if (d->flags & DEVFL_TKILL) in dummy_timer()
155 d->timer.expires = jiffies + HZ; in dummy_timer()
156 add_timer(&d->timer); in dummy_timer()
160 aoe_failip(struct aoedev *d) in aoe_failip() argument
166 aoe_failbuf(d, d->ip.buf); in aoe_failip()
167 rq = d->ip.rq; in aoe_failip()
172 while ((bio = d->ip.nxbio)) { in aoe_failip()
174 d->ip.nxbio = bio->bi_next; in aoe_failip()
179 aoe_end_request(d, rq, 0); in aoe_failip()
191 aoe_failbuf(f->t->d, f->buf); in downdev_frame()
197 aoedev_downdev(struct aoedev *d) in aoedev_downdev() argument
203 d->flags &= ~DEVFL_UP; in aoedev_downdev()
207 head = &d->factive[i]; in aoedev_downdev()
211 head = &d->rexmitq; in aoedev_downdev()
216 tt = d->targets; in aoedev_downdev()
217 te = tt + d->ntargets; in aoedev_downdev()
224 aoe_failip(d); in aoedev_downdev()
227 if (d->blkq) { in aoedev_downdev()
229 unsigned int memflags = blk_mq_freeze_queue(d->blkq); in aoedev_downdev()
231 blk_mq_quiesce_queue(d->blkq); in aoedev_downdev()
232 blk_mq_unquiesce_queue(d->blkq); in aoedev_downdev()
233 blk_mq_unfreeze_queue(d->blkq, memflags); in aoedev_downdev()
236 if (d->gd) in aoedev_downdev()
237 set_capacity(d->gd, 0); in aoedev_downdev()
244 user_req(char *s, size_t slen, struct aoedev *d) in user_req() argument
249 if (!d->gd) in user_req()
251 p = kbasename(d->gd->disk_name); in user_req()
252 lim = sizeof(d->gd->disk_name); in user_req()
253 lim -= p - d->gd->disk_name; in user_req()
261 freedev(struct aoedev *d) in freedev() argument
267 spin_lock_irqsave(&d->lock, flags); in freedev()
268 if (d->flags & DEVFL_TKILL in freedev()
269 && !(d->flags & DEVFL_FREEING)) { in freedev()
270 d->flags |= DEVFL_FREEING; in freedev()
273 spin_unlock_irqrestore(&d->lock, flags); in freedev()
277 del_timer_sync(&d->timer); in freedev()
278 if (d->gd) { in freedev()
279 aoedisk_rm_debugfs(d); in freedev()
280 del_gendisk(d->gd); in freedev()
281 put_disk(d->gd); in freedev()
282 blk_mq_free_tag_set(&d->tag_set); in freedev()
284 t = d->targets; in freedev()
285 e = t + d->ntargets; in freedev()
287 freetgt(d, *t); in freedev()
289 mempool_destroy(d->bufpool); in freedev()
290 skbpoolfree(d); in freedev()
291 minor_free(d->sysminor); in freedev()
293 spin_lock_irqsave(&d->lock, flags); in freedev()
294 d->flags |= DEVFL_FREED; in freedev()
295 spin_unlock_irqrestore(&d->lock, flags); in freedev()
307 struct aoedev *d, **dd; in flush() local
329 for (d = devlist; d; d = d->next) { in flush()
330 spin_lock(&d->lock); in flush()
331 if (d->flags & DEVFL_TKILL) in flush()
337 if (!user_req(buf, cnt, d)) in flush()
339 } else if ((!all && (d->flags & DEVFL_UP)) in flush()
340 || d->flags & skipflags in flush()
341 || d->nopen in flush()
342 || d->ref) in flush()
345 spin_unlock(&d->lock); in flush()
347 aoedev_downdev(d); in flush()
348 d->flags |= DEVFL_TKILL; in flush()
351 spin_unlock(&d->lock); in flush()
360 for (d = devlist; d; d = d->next) { in flush()
361 spin_lock(&d->lock); in flush()
362 if (d->flags & DEVFL_TKILL in flush()
363 && !(d->flags & DEVFL_FREEING)) { in flush()
364 spin_unlock(&d->lock); in flush()
366 freedev(d); in flush()
369 spin_unlock(&d->lock); in flush()
373 for (dd = &devlist, d = *dd; d; d = *dd) { in flush()
376 spin_lock(&d->lock); in flush()
377 if (d->flags & DEVFL_FREED) { in flush()
378 *dd = d->next; in flush()
379 doomed = d; in flush()
381 dd = &d->next; in flush()
383 spin_unlock(&d->lock); in flush()
428 skbpoolfree(struct aoedev *d) in skbpoolfree() argument
432 skb_queue_walk_safe(&d->skbpool, skb, tmp) in skbpoolfree()
435 __skb_queue_head_init(&d->skbpool); in skbpoolfree()
442 struct aoedev *d; in aoedev_by_aoeaddr() local
449 for (d=devlist; d; d=d->next) in aoedev_by_aoeaddr()
450 if (d->aoemajor == maj && d->aoeminor == min) { in aoedev_by_aoeaddr()
451 spin_lock(&d->lock); in aoedev_by_aoeaddr()
452 if (d->flags & DEVFL_TKILL) { in aoedev_by_aoeaddr()
453 spin_unlock(&d->lock); in aoedev_by_aoeaddr()
454 d = NULL; in aoedev_by_aoeaddr()
457 d->ref++; in aoedev_by_aoeaddr()
458 spin_unlock(&d->lock); in aoedev_by_aoeaddr()
461 if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0) in aoedev_by_aoeaddr()
463 d = kcalloc(1, sizeof *d, GFP_ATOMIC); in aoedev_by_aoeaddr()
464 if (!d) in aoedev_by_aoeaddr()
466 d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC); in aoedev_by_aoeaddr()
467 if (!d->targets) { in aoedev_by_aoeaddr()
468 kfree(d); in aoedev_by_aoeaddr()
469 d = NULL; in aoedev_by_aoeaddr()
472 d->ntargets = NTARGETS; in aoedev_by_aoeaddr()
473 INIT_WORK(&d->work, aoecmd_sleepwork); in aoedev_by_aoeaddr()
474 spin_lock_init(&d->lock); in aoedev_by_aoeaddr()
475 INIT_LIST_HEAD(&d->rq_list); in aoedev_by_aoeaddr()
476 skb_queue_head_init(&d->skbpool); in aoedev_by_aoeaddr()
477 timer_setup(&d->timer, dummy_timer, 0); in aoedev_by_aoeaddr()
478 d->timer.expires = jiffies + HZ; in aoedev_by_aoeaddr()
479 add_timer(&d->timer); in aoedev_by_aoeaddr()
480 d->bufpool = NULL; /* defer to aoeblk_gdalloc */ in aoedev_by_aoeaddr()
481 d->tgt = d->targets; in aoedev_by_aoeaddr()
482 d->ref = 1; in aoedev_by_aoeaddr()
484 INIT_LIST_HEAD(&d->factive[i]); in aoedev_by_aoeaddr()
485 INIT_LIST_HEAD(&d->rexmitq); in aoedev_by_aoeaddr()
486 d->sysminor = sysminor; in aoedev_by_aoeaddr()
487 d->aoemajor = maj; in aoedev_by_aoeaddr()
488 d->aoeminor = min; in aoedev_by_aoeaddr()
489 d->rttavg = RTTAVG_INIT; in aoedev_by_aoeaddr()
490 d->rttdev = RTTDEV_INIT; in aoedev_by_aoeaddr()
491 d->next = devlist; in aoedev_by_aoeaddr()
492 devlist = d; in aoedev_by_aoeaddr()
495 return d; in aoedev_by_aoeaddr()
499 freetgt(struct aoedev *d, struct aoetgt *t) in freetgt() argument