Lines Matching refs:work
973 struct kthread_work *work; in kthread_worker_fn() local
996 work = NULL; in kthread_worker_fn()
999 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
1001 list_del_init(&work->node); in kthread_worker_fn()
1003 worker->current_work = work; in kthread_worker_fn()
1006 if (work) { in kthread_worker_fn()
1007 kthread_work_func_t func = work->func; in kthread_worker_fn()
1009 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
1010 work->func(work); in kthread_worker_fn()
1015 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn()
1141 struct kthread_work *work) in queuing_blocked() argument
1145 return !list_empty(&work->node) || work->canceling; in queuing_blocked()
1149 struct kthread_work *work) in kthread_insert_work_sanity_check() argument
1152 WARN_ON_ONCE(!list_empty(&work->node)); in kthread_insert_work_sanity_check()
1154 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
1159 struct kthread_work *work, in kthread_insert_work() argument
1162 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
1164 trace_sched_kthread_work_queue_work(worker, work); in kthread_insert_work()
1166 list_add_tail(&work->node, pos); in kthread_insert_work()
1167 work->worker = worker; in kthread_insert_work()
1185 struct kthread_work *work) in kthread_queue_work() argument
1191 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
1192 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
1211 struct kthread_work *work = &dwork->work; in kthread_delayed_work_timer_fn() local
1212 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn()
1224 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
1227 WARN_ON_ONCE(list_empty(&work->node)); in kthread_delayed_work_timer_fn()
1228 list_del_init(&work->node); in kthread_delayed_work_timer_fn()
1229 if (!work->canceling) in kthread_delayed_work_timer_fn()
1230 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
1241 struct kthread_work *work = &dwork->work; in __kthread_queue_delayed_work() local
1252 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
1257 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
1259 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
1260 work->worker = worker; in __kthread_queue_delayed_work()
1284 struct kthread_work *work = &dwork->work; in kthread_queue_delayed_work() local
1290 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
1301 struct kthread_work work; member
1305 static void kthread_flush_work_fn(struct kthread_work *work) in kthread_flush_work_fn() argument
1308 container_of(work, struct kthread_flush_work, work); in kthread_flush_work_fn()
1318 void kthread_flush_work(struct kthread_work *work) in kthread_flush_work() argument
1321 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), in kthread_flush_work()
1327 worker = work->worker; in kthread_flush_work()
1333 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
1335 if (!list_empty(&work->node)) in kthread_flush_work()
1336 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
1337 else if (worker->current_work == work) in kthread_flush_work()
1338 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
1357 static void kthread_cancel_delayed_work_timer(struct kthread_work *work, in kthread_cancel_delayed_work_timer() argument
1361 container_of(work, struct kthread_delayed_work, work); in kthread_cancel_delayed_work_timer()
1362 struct kthread_worker *worker = work->worker; in kthread_cancel_delayed_work_timer()
1370 work->canceling++; in kthread_cancel_delayed_work_timer()
1374 work->canceling--; in kthread_cancel_delayed_work_timer()
1390 static bool __kthread_cancel_work(struct kthread_work *work) in __kthread_cancel_work() argument
1396 if (!list_empty(&work->node)) { in __kthread_cancel_work()
1397 list_del_init(&work->node); in __kthread_cancel_work()
1431 struct kthread_work *work = &dwork->work; in kthread_mod_delayed_work() local
1438 if (!work->worker) { in kthread_mod_delayed_work()
1444 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1458 kthread_cancel_delayed_work_timer(work, &flags); in kthread_mod_delayed_work()
1459 if (work->canceling) { in kthread_mod_delayed_work()
1464 ret = __kthread_cancel_work(work); in kthread_mod_delayed_work()
1474 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) in __kthread_cancel_work_sync() argument
1476 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync()
1485 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1488 kthread_cancel_delayed_work_timer(work, &flags); in __kthread_cancel_work_sync()
1490 ret = __kthread_cancel_work(work); in __kthread_cancel_work_sync()
1492 if (worker->current_work != work) in __kthread_cancel_work_sync()
1499 work->canceling++; in __kthread_cancel_work_sync()
1501 kthread_flush_work(work); in __kthread_cancel_work_sync()
1503 work->canceling--; in __kthread_cancel_work_sync()
1527 bool kthread_cancel_work_sync(struct kthread_work *work) in kthread_cancel_work_sync() argument
1529 return __kthread_cancel_work_sync(work, false); in kthread_cancel_work_sync()
1544 return __kthread_cancel_work_sync(&dwork->work, true); in kthread_cancel_delayed_work_sync()
1558 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), in kthread_flush_worker()
1562 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()