Index: sys/external/bsd/drm2/linux/linux_work.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/linux/linux_work.c,v retrieving revision 1.4 diff -p -u -r1.4 linux_work.c --- sys/external/bsd/drm2/linux/linux_work.c 16 Jul 2014 20:59:58 -0000 1.4 +++ sys/external/bsd/drm2/linux/linux_work.c 25 Jul 2014 13:16:40 -0000 @@ -306,6 +306,22 @@ linux_work_locked(struct work_struct *wo { return __SIMPLELOCK_LOCKED_P(&work->w_lock); } + +static int /* XXX enum */ +linux_work_state(struct work_struct *work) +{ + + KASSERT(linux_work_locked(work)); + return work->w_state; +} + +static void +linux_work_transition(struct work_struct *work, int state) +{ + + KASSERT(linux_work_locked(work)); + work->w_state = state; +} /* * Work @@ -336,10 +352,10 @@ queue_work(struct workqueue_struct *wq, KASSERT(wq != NULL); linux_work_lock(work); - switch (work->w_state) { + switch (linux_work_state(work)) { case WORK_IDLE: case WORK_INVOKED: - work->w_state = WORK_PENDING; + linux_work_transition(work, WORK_PENDING); work->w_wq = wq; workqueue_enqueue(wq->wq_workqueue, &work->w_wk, NULL); newly_queued = true; @@ -363,7 +379,8 @@ queue_work(struct workqueue_struct *wq, break; default: - panic("work %p in bad state: %d", work, (int)work->w_state); + panic("work %p in bad state: %d", work, + (int)linux_work_state(work)); break; } linux_work_unlock(work); @@ -377,7 +394,7 @@ cancel_work_sync(struct work_struct *wor bool cancelled_p = false; linux_work_lock(work); - switch (work->w_state) { + switch (linux_work_state(work)) { case WORK_IDLE: /* Nothing to do. */ break; @@ -386,7 +403,7 @@ cancel_work_sync(struct work_struct *wor break; case WORK_PENDING: - work->w_state = WORK_CANCELLED; + linux_work_transition(work, WORK_CANCELLED); linux_wait_for_cancelled_work(work); cancelled_p = true; break; @@ -403,7 +420,8 @@ cancel_work_sync(struct work_struct *wor break; default: - panic("work %p in bad state: %d", work, (int)work->w_state); + panic("work %p in bad state: %d", work, + (int)linux_work_state(work)); break; } linux_work_unlock(work); @@ -417,7 +435,7 @@ linux_wait_for_cancelled_work(struct wor struct workqueue_struct *wq; KASSERT(linux_work_locked(work)); - KASSERT(work->w_state == WORK_CANCELLED); + KASSERT(linux_work_state(work) == WORK_CANCELLED); wq = work->w_wq; do { @@ -426,7 +444,8 @@ linux_wait_for_cancelled_work(struct wor cv_wait(&wq->wq_cv, &wq->wq_lock); mutex_exit(&wq->wq_lock); linux_work_lock(work); - } while ((work->w_state == WORK_CANCELLED) && (work->w_wq == wq)); + } while ((linux_work_state(work) == WORK_CANCELLED) && + (work->w_wq == wq)); } static void @@ -435,7 +454,7 @@ linux_wait_for_invoked_work(struct work_ struct workqueue_struct *wq; KASSERT(linux_work_locked(work)); - KASSERT(work->w_state == WORK_INVOKED); + KASSERT(linux_work_state(work) == WORK_INVOKED); wq = work->w_wq; mutex_enter(&wq->wq_lock); @@ -455,7 +474,7 @@ linux_worker(struct work *wk, void *arg) struct workqueue_struct *const wq = arg; linux_work_lock(work); - switch (work->w_state) { + switch (linux_work_state(work)) { case WORK_IDLE: panic("idle work %p got queued: %p", work, wq); break; @@ -469,7 +488,7 @@ linux_worker(struct work *wk, void *arg) /* Get ready to invoke this one. */ mutex_enter(&wq->wq_lock); - work->w_state = WORK_INVOKED; + linux_work_transition(work, WORK_INVOKED); KASSERT(wq->wq_current_work == NULL); wq->wq_current_work = work; mutex_exit(&wq->wq_lock); @@ -495,7 +514,7 @@ linux_worker(struct work *wk, void *arg) /* Return to idle; notify anyone waiting for cancellation. */ mutex_enter(&wq->wq_lock); - work->w_state = WORK_IDLE; + linux_work_transition(work, WORK_IDLE); work->w_wq = NULL; cv_broadcast(&wq->wq_cv); mutex_exit(&wq->wq_lock); @@ -506,7 +525,8 @@ linux_worker(struct work *wk, void *arg) break; default: - panic("work %p in bad state: %d", work, (int)work->w_state); + panic("work %p in bad state: %d", work, + (int)linux_work_state(work)); break; } linux_work_unlock(work); @@ -537,12 +557,12 @@ queue_delayed_work(struct workqueue_stru KASSERT(wq != NULL); linux_work_lock(&dw->work); - switch (dw->work.w_state) { + switch (linux_work_state(&dw->work)) { case WORK_IDLE: case WORK_INVOKED: if (ticks == 0) { /* Skip the delay and queue it now. */ - dw->work.w_state = WORK_PENDING; + linux_work_transition(&dw->work, WORK_PENDING); dw->work.w_wq = wq; workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk, NULL); @@ -550,7 +570,7 @@ queue_delayed_work(struct workqueue_stru callout_init(&dw->dw_callout, CALLOUT_MPSAFE); callout_reset(&dw->dw_callout, ticks, &linux_worker_intr, dw); - dw->work.w_state = WORK_DELAYED; + linux_work_transition(&dw->work, WORK_DELAYED); dw->work.w_wq = wq; TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry); } @@ -579,7 +599,7 @@ queue_delayed_work(struct workqueue_stru default: panic("delayed work %p in bad state: %d", dw, - (int)dw->work.w_state); + (int)linux_work_state(&dw->work)); break; } linux_work_unlock(&dw->work); @@ -596,12 +616,12 @@ mod_delayed_work(struct workqueue_struct KASSERT(wq != NULL); linux_work_lock(&dw->work); - switch (dw->work.w_state) { + switch (linux_work_state(&dw->work)) { case WORK_IDLE: case WORK_INVOKED: if (ticks == 0) { /* Skip the delay and queue it now. */ - dw->work.w_state = WORK_PENDING; + linux_work_transition(&dw->work, WORK_PENDING); dw->work.w_wq = wq; workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk, NULL); @@ -609,7 +629,7 @@ mod_delayed_work(struct workqueue_struct callout_init(&dw->dw_callout, CALLOUT_MPSAFE); callout_reset(&dw->dw_callout, ticks, &linux_worker_intr, dw); - dw->work.w_state = WORK_DELAYED; + linux_work_transition(&dw->work, WORK_DELAYED); dw->work.w_wq = wq; TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry); } @@ -637,7 +657,7 @@ mod_delayed_work(struct workqueue_struct default: panic("delayed work %p in bad state: %d", dw, - (int)dw->work.w_state); + (int)linux_work_state(&dw->work)); break; } linux_work_unlock(&dw->work); @@ -651,18 +671,18 @@ cancel_delayed_work(struct delayed_work bool cancelled_p = false; linux_work_lock(&dw->work); - switch (dw->work.w_state) { + switch (linux_work_state(&dw->work)) { case WORK_IDLE: /* Nothing to do. */ break; case WORK_DELAYED: - dw->work.w_state = WORK_DELAYED_CANCELLED; + linux_work_transition(&dw->work, WORK_DELAYED_CANCELLED); linux_cancel_delayed_work_callout(dw, false); cancelled_p = true; break; case WORK_PENDING: - dw->work.w_state = WORK_CANCELLED; + linux_work_transition(&dw->work, WORK_CANCELLED); cancelled_p = true; break; @@ -675,7 +695,7 @@ cancel_delayed_work(struct delayed_work default: panic("delayed work %p in bad state: %d", dw, - (int)dw->work.w_state); + (int)linux_work_state(&dw->work)); break; } linux_work_unlock(&dw->work); @@ -689,18 +709,18 @@ cancel_delayed_work_sync(struct delayed_ bool cancelled_p = false; linux_work_lock(&dw->work); - switch (dw->work.w_state) { + switch (linux_work_state(&dw->work)) { case WORK_IDLE: /* Nothing to do. */ break; case WORK_DELAYED: - dw->work.w_state = WORK_DELAYED_CANCELLED; + linux_work_transition(&dw->work, WORK_DELAYED_CANCELLED); linux_cancel_delayed_work_callout(dw, true); cancelled_p = true; break; case WORK_PENDING: - dw->work.w_state = WORK_CANCELLED; + linux_work_transition(&dw->work, WORK_CANCELLED); linux_wait_for_cancelled_work(&dw->work); cancelled_p = true; @@ -717,7 +737,7 @@ cancel_delayed_work_sync(struct delayed_ default: panic("delayed work %p in bad state: %d", dw, - (int)dw->work.w_state); + (int)linux_work_state(&dw->work)); break; } linux_work_unlock(&dw->work); @@ -731,7 +751,7 @@ linux_cancel_delayed_work_callout(struct bool fired_p; KASSERT(linux_work_locked(&dw->work)); - KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); + KASSERT(linux_work_state(&dw->work) == WORK_DELAYED_CANCELLED); if (wait) { /* @@ -760,13 +780,13 @@ linux_cancel_delayed_work_callout(struct struct workqueue_struct *wq; KASSERT(linux_work_locked(&dw->work)); - KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); + KASSERT(linux_work_state(&dw->work) == WORK_DELAYED_CANCELLED); wq = dw->work.w_wq; mutex_enter(&wq->wq_lock); TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry); callout_destroy(&dw->dw_callout); - dw->work.w_state = WORK_IDLE; + linux_work_transition(&dw->work, WORK_IDLE); dw->work.w_wq = NULL; cv_broadcast(&wq->wq_cv); mutex_exit(&wq->wq_lock); @@ -779,7 +799,7 @@ linux_wait_for_delayed_cancelled_work(st struct workqueue_struct *wq; KASSERT(linux_work_locked(&dw->work)); - KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); + KASSERT(linux_work_state(&dw->work) == WORK_DELAYED_CANCELLED); wq = dw->work.w_wq; do { @@ -788,7 +808,7 @@ linux_wait_for_delayed_cancelled_work(st cv_wait(&wq->wq_cv, &wq->wq_lock); mutex_exit(&wq->wq_lock); linux_work_lock(&dw->work); - } while ((dw->work.w_state == WORK_DELAYED_CANCELLED) && + } while ((linux_work_state(&dw->work) == WORK_DELAYED_CANCELLED) && (dw->work.w_wq == wq)); } @@ -800,20 +820,20 @@ linux_worker_intr(void *arg) linux_work_lock(&dw->work); - KASSERT((dw->work.w_state == WORK_DELAYED) || - (dw->work.w_state == WORK_DELAYED_CANCELLED)); + KASSERT((linux_work_state(&dw->work) == WORK_DELAYED) || + (linux_work_state(&dw->work) == WORK_DELAYED_CANCELLED)); wq = dw->work.w_wq; mutex_enter(&wq->wq_lock); /* Queue the work, or return it to idle and alert any cancellers. */ - if (__predict_true(dw->work.w_state == WORK_DELAYED)) { - dw->work.w_state = WORK_PENDING; + if (__predict_true(linux_work_state(&dw->work) == WORK_DELAYED)) { + linux_work_transition(&dw->work, WORK_PENDING); workqueue_enqueue(dw->work.w_wq->wq_workqueue, &dw->work.w_wk, NULL); } else { - KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); - dw->work.w_state = WORK_IDLE; + KASSERT(linux_work_state(&dw->work) == WORK_DELAYED_CANCELLED); + linux_work_transition(&dw->work, WORK_IDLE); dw->work.w_wq = NULL; cv_broadcast(&wq->wq_cv); }