Lines Matching full:work
29 /* List head pointing to ordered work list */
55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument
57 return work->wq->fs_info; in btrfs_work_owner()
163 * Hook for threshold which will be called before executing the work,
214 struct btrfs_work *work; in run_ordered_work() local
223 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
225 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
231 * updates from ordinary work function. in run_ordered_work()
237 * we leave the work item on the list as a barrier so in run_ordered_work()
238 * that later work items that are done don't have their in run_ordered_work()
241 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work()
243 trace_btrfs_ordered_sched(work); in run_ordered_work()
245 work->ordered_func(work, false); in run_ordered_work()
249 list_del(&work->ordered_list); in run_ordered_work()
252 if (work == self) { in run_ordered_work()
254 * This is the work item that the worker is currently in run_ordered_work()
258 * of work items. I.e., if a work item with the same in run_ordered_work()
259 * address and work function is queued twice, the second in run_ordered_work()
261 * work item may be freed and recycled with the same in run_ordered_work()
262 * work function; the workqueue code assumes that the in run_ordered_work()
263 * original work item cannot depend on the recycled work in run_ordered_work()
266 * Note that different types of Btrfs work can depend on in run_ordered_work()
267 * each other, and one type of work on one Btrfs in run_ordered_work()
268 * filesystem may even depend on the same type of work in run_ordered_work()
270 * Therefore, we must not allow the current work item to in run_ordered_work()
280 work->ordered_func(work, true); in run_ordered_work()
281 /* NB: work must not be dereferenced past this point. */ in run_ordered_work()
282 trace_btrfs_all_work_done(wq->fs_info, work); in run_ordered_work()
296 struct btrfs_work *work = container_of(normal_work, struct btrfs_work, in btrfs_work_helper() local
298 struct btrfs_workqueue *wq = work->wq; in btrfs_work_helper()
302 * We should not touch things inside work in the following cases: in btrfs_work_helper()
303 * 1) after work->func() if it has no ordered_func(..., true) to free in btrfs_work_helper()
304 * Since the struct is freed in work->func(). in btrfs_work_helper()
306 * The work may be freed in other threads almost instantly. in btrfs_work_helper()
309 if (work->ordered_func) in btrfs_work_helper()
312 trace_btrfs_work_sched(work); in btrfs_work_helper()
314 work->func(work); in btrfs_work_helper()
317 * Ensures all memory accesses done in the work function are in btrfs_work_helper()
319 * which is going to executed the ordered work sees them. in btrfs_work_helper()
323 set_bit(WORK_DONE_BIT, &work->flags); in btrfs_work_helper()
324 run_ordered_work(wq, work); in btrfs_work_helper()
326 /* NB: work must not be dereferenced past this point. */ in btrfs_work_helper()
327 trace_btrfs_all_work_done(wq->fs_info, work); in btrfs_work_helper()
331 void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, in btrfs_init_work() argument
334 work->func = func; in btrfs_init_work()
335 work->ordered_func = ordered_func; in btrfs_init_work()
336 INIT_WORK(&work->normal_work, btrfs_work_helper); in btrfs_init_work()
337 INIT_LIST_HEAD(&work->ordered_list); in btrfs_init_work()
338 work->flags = 0; in btrfs_init_work()
341 void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work) in btrfs_queue_work() argument
345 work->wq = wq; in btrfs_queue_work()
347 if (work->ordered_func) { in btrfs_queue_work()
349 list_add_tail(&work->ordered_list, &wq->ordered_list); in btrfs_queue_work()
352 trace_btrfs_work_queued(work); in btrfs_queue_work()
353 queue_work(wq->normal_wq, &work->normal_work); in btrfs_queue_work()