1 | /* $NetBSD: linux_work.c,v 1.1 2016/02/24 22:04:15 skrll Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2013 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.1 2016/02/24 22:04:15 skrll Exp $" ); |
34 | |
35 | #include <sys/types.h> |
36 | #include <sys/param.h> |
37 | #include <sys/atomic.h> |
38 | #include <sys/callout.h> |
39 | #include <sys/condvar.h> |
40 | #include <sys/errno.h> |
41 | #include <sys/intr.h> |
42 | #include <sys/kmem.h> |
43 | #include <sys/mutex.h> |
44 | #include <sys/queue.h> |
45 | #include <sys/systm.h> |
46 | #include <sys/workqueue.h> |
47 | #include <sys/cpu.h> |
48 | |
49 | #include <machine/lock.h> |
50 | |
51 | #include <linux/workqueue.h> |
52 | |
53 | /* XXX Kludge until we sync with HEAD. */ |
54 | #if DIAGNOSTIC |
55 | #define __diagused |
56 | #else |
57 | #define __diagused __unused |
58 | #endif |
59 | |
60 | struct workqueue_struct { |
61 | struct workqueue *wq_workqueue; |
62 | |
63 | /* XXX The following should all be per-CPU. */ |
64 | kmutex_t wq_lock; |
65 | |
66 | /* |
67 | * Condvar for when any state related to this workqueue |
68 | * changes. XXX Could split this into multiple condvars for |
69 | * different purposes, but whatever... |
70 | */ |
71 | kcondvar_t wq_cv; |
72 | |
73 | TAILQ_HEAD(, delayed_work) wq_delayed; |
74 | struct work_struct *wq_current_work; |
75 | }; |
76 | |
77 | static void linux_work_lock_init(struct work_struct *); |
78 | static void linux_work_lock(struct work_struct *); |
79 | static void linux_work_unlock(struct work_struct *); |
80 | static bool linux_work_locked(struct work_struct *) __diagused; |
81 | |
82 | static void linux_wq_barrier(struct work_struct *); |
83 | |
84 | static void linux_wait_for_cancelled_work(struct work_struct *); |
85 | static void linux_wait_for_invoked_work(struct work_struct *); |
86 | static void linux_worker(struct work *, void *); |
87 | |
88 | static void linux_cancel_delayed_work_callout(struct delayed_work *, bool); |
89 | static void linux_wait_for_delayed_cancelled_work(struct delayed_work *); |
90 | static void linux_worker_intr(void *); |
91 | |
92 | struct workqueue_struct *system_wq; |
93 | |
94 | int |
95 | linux_workqueue_init(void) |
96 | { |
97 | |
98 | system_wq = alloc_ordered_workqueue("lnxsyswq" , 0); |
99 | if (system_wq == NULL) |
100 | return ENOMEM; |
101 | |
102 | return 0; |
103 | } |
104 | |
105 | void |
106 | linux_workqueue_fini(void) |
107 | { |
108 | destroy_workqueue(system_wq); |
109 | system_wq = NULL; |
110 | } |
111 | |
112 | /* |
113 | * Workqueues |
114 | */ |
115 | |
116 | struct workqueue_struct * |
117 | alloc_ordered_workqueue(const char *name, int linux_flags) |
118 | { |
119 | struct workqueue_struct *wq; |
120 | int flags = WQ_MPSAFE; |
121 | int error; |
122 | |
123 | KASSERT(linux_flags == 0); |
124 | |
125 | wq = kmem_alloc(sizeof(*wq), KM_SLEEP); |
126 | error = workqueue_create(&wq->wq_workqueue, name, &linux_worker, |
127 | wq, PRI_NONE, IPL_VM, flags); |
128 | if (error) { |
129 | kmem_free(wq, sizeof(*wq)); |
130 | return NULL; |
131 | } |
132 | |
133 | mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM); |
134 | cv_init(&wq->wq_cv, name); |
135 | TAILQ_INIT(&wq->wq_delayed); |
136 | wq->wq_current_work = NULL; |
137 | |
138 | return wq; |
139 | } |
140 | |
141 | void |
142 | destroy_workqueue(struct workqueue_struct *wq) |
143 | { |
144 | |
145 | /* |
146 | * Cancel all delayed work. |
147 | */ |
148 | for (;;) { |
149 | struct delayed_work *dw; |
150 | |
151 | mutex_enter(&wq->wq_lock); |
152 | if (TAILQ_EMPTY(&wq->wq_delayed)) { |
153 | dw = NULL; |
154 | } else { |
155 | dw = TAILQ_FIRST(&wq->wq_delayed); |
156 | TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry); |
157 | } |
158 | mutex_exit(&wq->wq_lock); |
159 | |
160 | if (dw == NULL) |
161 | break; |
162 | |
163 | cancel_delayed_work_sync(dw); |
164 | } |
165 | |
166 | /* |
167 | * workqueue_destroy empties the queue; we need not wait for |
168 | * completion explicitly. However, we can't destroy the |
169 | * condvar or mutex until this is done. |
170 | */ |
171 | workqueue_destroy(wq->wq_workqueue); |
172 | KASSERT(wq->wq_current_work == NULL); |
173 | wq->wq_workqueue = NULL; |
174 | |
175 | cv_destroy(&wq->wq_cv); |
176 | mutex_destroy(&wq->wq_lock); |
177 | |
178 | kmem_free(wq, sizeof(*wq)); |
179 | } |
180 | |
181 | /* |
182 | * Flush |
183 | * |
184 | * Note: This doesn't cancel or wait for delayed work. This seems to |
185 | * match what Linux does (or, doesn't do). |
186 | */ |
187 | |
188 | void |
189 | flush_scheduled_work(void) |
190 | { |
191 | flush_workqueue(system_wq); |
192 | } |
193 | |
194 | struct wq_flush_work { |
195 | struct work_struct wqfw_work; |
196 | struct wq_flush *wqfw_flush; |
197 | }; |
198 | |
199 | struct wq_flush { |
200 | kmutex_t wqf_lock; |
201 | kcondvar_t wqf_cv; |
202 | unsigned int wqf_n; |
203 | }; |
204 | |
205 | void |
206 | flush_work(struct work_struct *work) |
207 | { |
208 | struct workqueue_struct *const wq = work->w_wq; |
209 | |
210 | if (wq != NULL) |
211 | flush_workqueue(wq); |
212 | } |
213 | |
214 | void |
215 | flush_workqueue(struct workqueue_struct *wq) |
216 | { |
217 | static const struct wq_flush zero_wqf; |
218 | struct wq_flush wqf = zero_wqf; |
219 | |
220 | mutex_init(&wqf.wqf_lock, MUTEX_DEFAULT, IPL_NONE); |
221 | cv_init(&wqf.wqf_cv, "lnxwflsh" ); |
222 | |
223 | if (1) { |
224 | struct wq_flush_work *const wqfw = kmem_zalloc(sizeof(*wqfw), |
225 | KM_SLEEP); |
226 | |
227 | wqf.wqf_n = 1; |
228 | wqfw->wqfw_flush = &wqf; |
229 | INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier); |
230 | wqfw->wqfw_work.w_wq = wq; |
231 | wqfw->wqfw_work.w_state = WORK_PENDING; |
232 | workqueue_enqueue(wq->wq_workqueue, &wqfw->wqfw_work.w_wk, |
233 | NULL); |
234 | } else { |
235 | struct cpu_info *ci; |
236 | CPU_INFO_ITERATOR cii; |
237 | struct wq_flush_work *wqfw; |
238 | |
239 | panic("per-CPU Linux workqueues don't work yet!" ); |
240 | |
241 | wqf.wqf_n = 0; |
242 | for (CPU_INFO_FOREACH(cii, ci)) { |
243 | wqfw = kmem_zalloc(sizeof(*wqfw), KM_SLEEP); |
244 | mutex_enter(&wqf.wqf_lock); |
245 | wqf.wqf_n++; |
246 | mutex_exit(&wqf.wqf_lock); |
247 | wqfw->wqfw_flush = &wqf; |
248 | INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier); |
249 | wqfw->wqfw_work.w_state = WORK_PENDING; |
250 | wqfw->wqfw_work.w_wq = wq; |
251 | workqueue_enqueue(wq->wq_workqueue, |
252 | &wqfw->wqfw_work.w_wk, ci); |
253 | } |
254 | } |
255 | |
256 | mutex_enter(&wqf.wqf_lock); |
257 | while (0 < wqf.wqf_n) |
258 | cv_wait(&wqf.wqf_cv, &wqf.wqf_lock); |
259 | mutex_exit(&wqf.wqf_lock); |
260 | |
261 | cv_destroy(&wqf.wqf_cv); |
262 | mutex_destroy(&wqf.wqf_lock); |
263 | } |
264 | |
265 | static void |
266 | linux_wq_barrier(struct work_struct *work) |
267 | { |
268 | struct wq_flush_work *const wqfw = container_of(work, |
269 | struct wq_flush_work, wqfw_work); |
270 | struct wq_flush *const wqf = wqfw->wqfw_flush; |
271 | |
272 | mutex_enter(&wqf->wqf_lock); |
273 | if (--wqf->wqf_n == 0) |
274 | cv_broadcast(&wqf->wqf_cv); |
275 | mutex_exit(&wqf->wqf_lock); |
276 | |
277 | kmem_free(wqfw, sizeof(*wqfw)); |
278 | } |
279 | |
280 | /* |
281 | * Work locking |
282 | * |
283 | * We use __cpu_simple_lock(9) rather than mutex(9) because Linux code |
284 | * does not destroy work, so there is nowhere to call mutex_destroy. |
285 | * |
286 | * XXX This is getting out of hand... Really, work items shouldn't |
287 | * have locks in them at all; instead the workqueues should. |
288 | */ |
289 | |
290 | static void |
291 | linux_work_lock_init(struct work_struct *work) |
292 | { |
293 | |
294 | __cpu_simple_lock_init(&work->w_lock); |
295 | } |
296 | |
297 | static void |
298 | linux_work_lock(struct work_struct *work) |
299 | { |
300 | struct cpu_info *ci; |
301 | int cnt, s; |
302 | |
303 | /* XXX Copypasta of MUTEX_SPIN_SPLRAISE. */ |
304 | s = splvm(); |
305 | ci = curcpu(); |
306 | cnt = ci->ci_mtx_count--; |
307 | __insn_barrier(); |
308 | if (cnt == 0) |
309 | ci->ci_mtx_oldspl = s; |
310 | |
311 | __cpu_simple_lock(&work->w_lock); |
312 | } |
313 | |
314 | static void |
315 | linux_work_unlock(struct work_struct *work) |
316 | { |
317 | struct cpu_info *ci; |
318 | int s; |
319 | |
320 | __cpu_simple_unlock(&work->w_lock); |
321 | |
322 | /* XXX Copypasta of MUTEX_SPIN_SPLRESTORE. */ |
323 | ci = curcpu(); |
324 | s = ci->ci_mtx_oldspl; |
325 | __insn_barrier(); |
326 | if (++ci->ci_mtx_count == 0) |
327 | splx(s); |
328 | } |
329 | |
330 | static bool __diagused |
331 | linux_work_locked(struct work_struct *work) |
332 | { |
333 | return __SIMPLELOCK_LOCKED_P(&work->w_lock); |
334 | } |
335 | |
336 | /* |
337 | * Work |
338 | */ |
339 | |
340 | void |
341 | INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *)) |
342 | { |
343 | |
344 | linux_work_lock_init(work); |
345 | work->w_state = WORK_IDLE; |
346 | work->w_wq = NULL; |
347 | work->w_fn = fn; |
348 | } |
349 | |
350 | bool |
351 | schedule_work(struct work_struct *work) |
352 | { |
353 | return queue_work(system_wq, work); |
354 | } |
355 | |
356 | bool |
357 | queue_work(struct workqueue_struct *wq, struct work_struct *work) |
358 | { |
359 | /* True if we put it on the queue, false if it was already there. */ |
360 | bool newly_queued; |
361 | |
362 | KASSERT(wq != NULL); |
363 | |
364 | linux_work_lock(work); |
365 | switch (work->w_state) { |
366 | case WORK_IDLE: |
367 | case WORK_INVOKED: |
368 | work->w_state = WORK_PENDING; |
369 | work->w_wq = wq; |
370 | workqueue_enqueue(wq->wq_workqueue, &work->w_wk, NULL); |
371 | newly_queued = true; |
372 | break; |
373 | |
374 | case WORK_DELAYED: |
375 | panic("queue_work(delayed work %p)" , work); |
376 | break; |
377 | |
378 | case WORK_PENDING: |
379 | KASSERT(work->w_wq == wq); |
380 | newly_queued = false; |
381 | break; |
382 | |
383 | case WORK_CANCELLED: |
384 | newly_queued = false; |
385 | break; |
386 | |
387 | case WORK_DELAYED_CANCELLED: |
388 | panic("queue_work(delayed work %p)" , work); |
389 | break; |
390 | |
391 | default: |
392 | panic("work %p in bad state: %d" , work, (int)work->w_state); |
393 | break; |
394 | } |
395 | linux_work_unlock(work); |
396 | |
397 | return newly_queued; |
398 | } |
399 | |
400 | bool |
401 | cancel_work_sync(struct work_struct *work) |
402 | { |
403 | bool cancelled_p = false; |
404 | |
405 | linux_work_lock(work); |
406 | switch (work->w_state) { |
407 | case WORK_IDLE: /* Nothing to do. */ |
408 | break; |
409 | |
410 | case WORK_DELAYED: |
411 | panic("cancel_work_sync(delayed work %p)" , work); |
412 | break; |
413 | |
414 | case WORK_PENDING: |
415 | work->w_state = WORK_CANCELLED; |
416 | linux_wait_for_cancelled_work(work); |
417 | cancelled_p = true; |
418 | break; |
419 | |
420 | case WORK_INVOKED: |
421 | linux_wait_for_invoked_work(work); |
422 | break; |
423 | |
424 | case WORK_CANCELLED: /* Already done. */ |
425 | break; |
426 | |
427 | case WORK_DELAYED_CANCELLED: |
428 | panic("cancel_work_sync(delayed work %p)" , work); |
429 | break; |
430 | |
431 | default: |
432 | panic("work %p in bad state: %d" , work, (int)work->w_state); |
433 | break; |
434 | } |
435 | linux_work_unlock(work); |
436 | |
437 | return cancelled_p; |
438 | } |
439 | |
440 | static void |
441 | linux_wait_for_cancelled_work(struct work_struct *work) |
442 | { |
443 | struct workqueue_struct *wq; |
444 | |
445 | KASSERT(linux_work_locked(work)); |
446 | KASSERT(work->w_state == WORK_CANCELLED); |
447 | |
448 | wq = work->w_wq; |
449 | do { |
450 | mutex_enter(&wq->wq_lock); |
451 | linux_work_unlock(work); |
452 | cv_wait(&wq->wq_cv, &wq->wq_lock); |
453 | mutex_exit(&wq->wq_lock); |
454 | linux_work_lock(work); |
455 | } while ((work->w_state == WORK_CANCELLED) && (work->w_wq == wq)); |
456 | } |
457 | |
458 | static void |
459 | linux_wait_for_invoked_work(struct work_struct *work) |
460 | { |
461 | struct workqueue_struct *wq; |
462 | |
463 | KASSERT(linux_work_locked(work)); |
464 | KASSERT(work->w_state == WORK_INVOKED); |
465 | |
466 | wq = work->w_wq; |
467 | mutex_enter(&wq->wq_lock); |
468 | linux_work_unlock(work); |
469 | while (wq->wq_current_work == work) |
470 | cv_wait(&wq->wq_cv, &wq->wq_lock); |
471 | mutex_exit(&wq->wq_lock); |
472 | |
473 | linux_work_lock(work); /* XXX needless relock */ |
474 | } |
475 | |
476 | static void |
477 | linux_worker(struct work *wk, void *arg) |
478 | { |
479 | struct work_struct *const work = container_of(wk, struct work_struct, |
480 | w_wk); |
481 | struct workqueue_struct *const wq = arg; |
482 | |
483 | linux_work_lock(work); |
484 | switch (work->w_state) { |
485 | case WORK_IDLE: |
486 | panic("idle work %p got queued: %p" , work, wq); |
487 | break; |
488 | |
489 | case WORK_DELAYED: |
490 | panic("delayed work %p got queued: %p" , work, wq); |
491 | break; |
492 | |
493 | case WORK_PENDING: |
494 | KASSERT(work->w_wq == wq); |
495 | |
496 | /* Get ready to invoke this one. */ |
497 | mutex_enter(&wq->wq_lock); |
498 | work->w_state = WORK_INVOKED; |
499 | KASSERT(wq->wq_current_work == NULL); |
500 | wq->wq_current_work = work; |
501 | mutex_exit(&wq->wq_lock); |
502 | |
503 | /* Unlock it and do it. Can't use work after this. */ |
504 | linux_work_unlock(work); |
505 | (*work->w_fn)(work); |
506 | |
507 | /* All done. Notify anyone waiting for completion. */ |
508 | mutex_enter(&wq->wq_lock); |
509 | KASSERT(wq->wq_current_work == work); |
510 | wq->wq_current_work = NULL; |
511 | cv_broadcast(&wq->wq_cv); |
512 | mutex_exit(&wq->wq_lock); |
513 | return; |
514 | |
515 | case WORK_INVOKED: |
516 | panic("invoked work %p got requeued: %p" , work, wq); |
517 | break; |
518 | |
519 | case WORK_CANCELLED: |
520 | KASSERT(work->w_wq == wq); |
521 | |
522 | /* Return to idle; notify anyone waiting for cancellation. */ |
523 | mutex_enter(&wq->wq_lock); |
524 | work->w_state = WORK_IDLE; |
525 | work->w_wq = NULL; |
526 | cv_broadcast(&wq->wq_cv); |
527 | mutex_exit(&wq->wq_lock); |
528 | break; |
529 | |
530 | case WORK_DELAYED_CANCELLED: |
531 | panic("cancelled delayed work %p got uqeued: %p" , work, wq); |
532 | break; |
533 | |
534 | default: |
535 | panic("work %p in bad state: %d" , work, (int)work->w_state); |
536 | break; |
537 | } |
538 | linux_work_unlock(work); |
539 | } |
540 | |
541 | /* |
542 | * Delayed work |
543 | */ |
544 | |
545 | void |
546 | INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *)) |
547 | { |
548 | INIT_WORK(&dw->work, fn); |
549 | } |
550 | |
551 | bool |
552 | schedule_delayed_work(struct delayed_work *dw, unsigned long ticks) |
553 | { |
554 | return queue_delayed_work(system_wq, dw, ticks); |
555 | } |
556 | |
557 | bool |
558 | queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw, |
559 | unsigned long ticks) |
560 | { |
561 | bool newly_queued; |
562 | |
563 | KASSERT(wq != NULL); |
564 | |
565 | linux_work_lock(&dw->work); |
566 | switch (dw->work.w_state) { |
567 | case WORK_IDLE: |
568 | case WORK_INVOKED: |
569 | if (ticks == 0) { |
570 | /* Skip the delay and queue it now. */ |
571 | dw->work.w_state = WORK_PENDING; |
572 | dw->work.w_wq = wq; |
573 | workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk, |
574 | NULL); |
575 | } else { |
576 | callout_init(&dw->dw_callout, CALLOUT_MPSAFE); |
577 | callout_reset(&dw->dw_callout, ticks, |
578 | &linux_worker_intr, dw); |
579 | dw->work.w_state = WORK_DELAYED; |
580 | dw->work.w_wq = wq; |
581 | mutex_enter(&wq->wq_lock); |
582 | TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry); |
583 | mutex_exit(&wq->wq_lock); |
584 | } |
585 | newly_queued = true; |
586 | break; |
587 | |
588 | case WORK_DELAYED: |
589 | /* |
590 | * Timer is already ticking. Leave it to time out |
591 | * whenever it was going to time out, as Linux does -- |
592 | * neither speed it up nor postpone it. |
593 | */ |
594 | newly_queued = false; |
595 | break; |
596 | |
597 | case WORK_PENDING: |
598 | KASSERT(dw->work.w_wq == wq); |
599 | newly_queued = false; |
600 | break; |
601 | |
602 | case WORK_CANCELLED: |
603 | case WORK_DELAYED_CANCELLED: |
604 | /* XXX Wait for cancellation and then queue? */ |
605 | newly_queued = false; |
606 | break; |
607 | |
608 | default: |
609 | panic("delayed work %p in bad state: %d" , dw, |
610 | (int)dw->work.w_state); |
611 | break; |
612 | } |
613 | linux_work_unlock(&dw->work); |
614 | |
615 | return newly_queued; |
616 | } |
617 | |
618 | bool |
619 | mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw, |
620 | unsigned long ticks) |
621 | { |
622 | bool timer_modified; |
623 | |
624 | KASSERT(wq != NULL); |
625 | |
626 | linux_work_lock(&dw->work); |
627 | switch (dw->work.w_state) { |
628 | case WORK_IDLE: |
629 | case WORK_INVOKED: |
630 | if (ticks == 0) { |
631 | /* Skip the delay and queue it now. */ |
632 | dw->work.w_state = WORK_PENDING; |
633 | dw->work.w_wq = wq; |
634 | workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk, |
635 | NULL); |
636 | } else { |
637 | callout_init(&dw->dw_callout, CALLOUT_MPSAFE); |
638 | callout_reset(&dw->dw_callout, ticks, |
639 | &linux_worker_intr, dw); |
640 | dw->work.w_state = WORK_DELAYED; |
641 | dw->work.w_wq = wq; |
642 | mutex_enter(&wq->wq_lock); |
643 | TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry); |
644 | mutex_exit(&wq->wq_lock); |
645 | } |
646 | timer_modified = false; |
647 | break; |
648 | |
649 | case WORK_DELAYED: |
650 | /* |
651 | * Timer is already ticking. Reschedule it. |
652 | */ |
653 | callout_schedule(&dw->dw_callout, ticks); |
654 | timer_modified = true; |
655 | break; |
656 | |
657 | case WORK_PENDING: |
658 | KASSERT(dw->work.w_wq == wq); |
659 | timer_modified = false; |
660 | break; |
661 | |
662 | case WORK_CANCELLED: |
663 | case WORK_DELAYED_CANCELLED: |
664 | /* XXX Wait for cancellation and then queue? */ |
665 | timer_modified = false; |
666 | break; |
667 | |
668 | default: |
669 | panic("delayed work %p in bad state: %d" , dw, |
670 | (int)dw->work.w_state); |
671 | break; |
672 | } |
673 | linux_work_unlock(&dw->work); |
674 | |
675 | return timer_modified; |
676 | } |
677 | |
678 | bool |
679 | cancel_delayed_work(struct delayed_work *dw) |
680 | { |
681 | bool cancelled_p = false; |
682 | |
683 | linux_work_lock(&dw->work); |
684 | switch (dw->work.w_state) { |
685 | case WORK_IDLE: /* Nothing to do. */ |
686 | break; |
687 | |
688 | case WORK_DELAYED: |
689 | dw->work.w_state = WORK_DELAYED_CANCELLED; |
690 | linux_cancel_delayed_work_callout(dw, false); |
691 | cancelled_p = true; |
692 | break; |
693 | |
694 | case WORK_PENDING: |
695 | dw->work.w_state = WORK_CANCELLED; |
696 | cancelled_p = true; |
697 | break; |
698 | |
699 | case WORK_INVOKED: /* Don't wait! */ |
700 | break; |
701 | |
702 | case WORK_CANCELLED: /* Already done. */ |
703 | case WORK_DELAYED_CANCELLED: |
704 | break; |
705 | |
706 | default: |
707 | panic("delayed work %p in bad state: %d" , dw, |
708 | (int)dw->work.w_state); |
709 | break; |
710 | } |
711 | linux_work_unlock(&dw->work); |
712 | |
713 | return cancelled_p; |
714 | } |
715 | |
716 | bool |
717 | cancel_delayed_work_sync(struct delayed_work *dw) |
718 | { |
719 | bool cancelled_p = false; |
720 | |
721 | linux_work_lock(&dw->work); |
722 | switch (dw->work.w_state) { |
723 | case WORK_IDLE: /* Nothing to do. */ |
724 | break; |
725 | |
726 | case WORK_DELAYED: |
727 | dw->work.w_state = WORK_DELAYED_CANCELLED; |
728 | linux_cancel_delayed_work_callout(dw, true); |
729 | cancelled_p = true; |
730 | break; |
731 | |
732 | case WORK_PENDING: |
733 | dw->work.w_state = WORK_CANCELLED; |
734 | linux_wait_for_cancelled_work(&dw->work); |
735 | cancelled_p = true; |
736 | break; |
737 | |
738 | case WORK_INVOKED: |
739 | linux_wait_for_invoked_work(&dw->work); |
740 | break; |
741 | |
742 | case WORK_CANCELLED: /* Already done. */ |
743 | break; |
744 | |
745 | case WORK_DELAYED_CANCELLED: |
746 | linux_wait_for_delayed_cancelled_work(dw); |
747 | break; |
748 | |
749 | default: |
750 | panic("delayed work %p in bad state: %d" , dw, |
751 | (int)dw->work.w_state); |
752 | break; |
753 | } |
754 | linux_work_unlock(&dw->work); |
755 | |
756 | return cancelled_p; |
757 | } |
758 | |
759 | static void |
760 | linux_cancel_delayed_work_callout(struct delayed_work *dw, bool wait) |
761 | { |
762 | bool fired_p; |
763 | |
764 | KASSERT(linux_work_locked(&dw->work)); |
765 | KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); |
766 | |
767 | if (wait) { |
768 | /* |
769 | * We unlock, halt, and then relock, rather than |
770 | * passing an interlock to callout_halt, for two |
771 | * reasons: |
772 | * |
773 | * (1) The work lock is not a mutex(9), so we can't use it. |
774 | * (2) The WORK_DELAYED_CANCELLED state serves as an interlock. |
775 | */ |
776 | linux_work_unlock(&dw->work); |
777 | fired_p = callout_halt(&dw->dw_callout, NULL); |
778 | linux_work_lock(&dw->work); |
779 | } else { |
780 | fired_p = callout_stop(&dw->dw_callout); |
781 | } |
782 | |
783 | /* |
784 | * fired_p means we didn't cancel the callout, so it must have |
785 | * already begun and will clean up after itself. |
786 | * |
787 | * !fired_p means we cancelled it so we have to clean up after |
788 | * it. Nobody else should have changed the state in that case. |
789 | */ |
790 | if (!fired_p) { |
791 | struct workqueue_struct *wq; |
792 | |
793 | KASSERT(linux_work_locked(&dw->work)); |
794 | KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); |
795 | |
796 | wq = dw->work.w_wq; |
797 | mutex_enter(&wq->wq_lock); |
798 | TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry); |
799 | callout_destroy(&dw->dw_callout); |
800 | dw->work.w_state = WORK_IDLE; |
801 | dw->work.w_wq = NULL; |
802 | cv_broadcast(&wq->wq_cv); |
803 | mutex_exit(&wq->wq_lock); |
804 | } |
805 | } |
806 | |
807 | static void |
808 | linux_wait_for_delayed_cancelled_work(struct delayed_work *dw) |
809 | { |
810 | struct workqueue_struct *wq; |
811 | |
812 | KASSERT(linux_work_locked(&dw->work)); |
813 | KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); |
814 | |
815 | wq = dw->work.w_wq; |
816 | do { |
817 | mutex_enter(&wq->wq_lock); |
818 | linux_work_unlock(&dw->work); |
819 | cv_wait(&wq->wq_cv, &wq->wq_lock); |
820 | mutex_exit(&wq->wq_lock); |
821 | linux_work_lock(&dw->work); |
822 | } while ((dw->work.w_state == WORK_DELAYED_CANCELLED) && |
823 | (dw->work.w_wq == wq)); |
824 | } |
825 | |
826 | static void |
827 | linux_worker_intr(void *arg) |
828 | { |
829 | struct delayed_work *dw = arg; |
830 | struct workqueue_struct *wq; |
831 | |
832 | linux_work_lock(&dw->work); |
833 | |
834 | KASSERT((dw->work.w_state == WORK_DELAYED) || |
835 | (dw->work.w_state == WORK_DELAYED_CANCELLED)); |
836 | |
837 | wq = dw->work.w_wq; |
838 | mutex_enter(&wq->wq_lock); |
839 | |
840 | /* Queue the work, or return it to idle and alert any cancellers. */ |
841 | if (__predict_true(dw->work.w_state == WORK_DELAYED)) { |
842 | dw->work.w_state = WORK_PENDING; |
843 | workqueue_enqueue(dw->work.w_wq->wq_workqueue, &dw->work.w_wk, |
844 | NULL); |
845 | } else { |
846 | KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); |
847 | dw->work.w_state = WORK_IDLE; |
848 | dw->work.w_wq = NULL; |
849 | cv_broadcast(&wq->wq_cv); |
850 | } |
851 | |
852 | /* Either way, the callout is done. */ |
853 | TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry); |
854 | callout_destroy(&dw->dw_callout); |
855 | |
856 | mutex_exit(&wq->wq_lock); |
857 | linux_work_unlock(&dw->work); |
858 | } |
859 | |