struct pref {
	struct percpu	*pref_percpu; /* struct pref_cpu */
	kmutex_t	pref_lock;
	kcondvar_t	pref_cv;
	int		pref_count;
	bool		pref_draining;
};

struct pref_cpu {
	int		prc_count;
};

int
pref_init(struct pref *pref)
{

	pref->pref_percpu = percpu_alloc(sizeof(struct pref_cpu));
	if (pref->pref_percpu == NULL)
		return ENOMEM;

	mutex_init(&pref->pref_lock);
	cv_init(&pref->pref_cv, "pref");
	pref->pref_count = 0;
	pref->pref_draining = false;

	return 0;
}

void
pref_destroy(struct pref *pref)
{

	KASSERT(pref->pref_draining);
	cv_destroy(&pref->pref_cv);
	mutex_destroy(&pref->pref_lock);
	percpu_free(pref->pref_percpu, sizeof(struct pref_cpu));
}

int
pref_acquire(struct pref *pref)
{
	struct pref_cpu *prc;
	int s, error;

	/* Block xcalls and acquire the per-CPU counter.  */
	s = splsoftserial();
	prc = percpu_getref(pref->pref_percpu);

	/* Is this object going away?  */
	if (pref->pref_draining) {
		/* Yes: fail with ENOENT.  */
		error = ENOENT;
	} else {
		/* No: count one more user on this CPU.  */
		KASSERT(prc->prc_count < INT_MAX);
		prc->prc_count++;
		error = 0;
	}

	percpu_putref(pref->pref_percpu);
	splx(s);

	return error;
}

void
pref_release(struct pref *pref)
{
	struct pref_cpu *prc;
	unsigned count;
	int s, error;

	/* Block xcalls and acquire the per-CPU counter.  */
	s = splsoftserial();
	prc = percpu_getref(pref->pref_percpu);

	/* The count of users on this CPU had better not be too low.  */
	/*
	 * XXX It can be, if every user who released on this CPU had
	 * migrated from another one!
	 */
	KASSERT(INT_MIN < prc->prc_count);

	/* Decrement the count of users on this CPU.  */
	prc->prc_count--;

	/* Is anyone waiting for users to drain?  */
	if (pref->pref_draining) {
		/*
		 * Yes: under the global lock, decrement the global
		 * count, and if it went to zero, notify whoever is
		 * waiting for users to drain.
		 */
		mutex_enter(&pref->pref_lock);
		if (--pref->pref_count == 0)
			cv_broadcast(&pref->pref_cv);
		mutex_exit(&pref->pref_lock);
	}

	percpu_putref(pref->pref_percpu);
	splx(s);
}

static void
pref_drain_xc(void *cookie0, void *cookie1 __unused)
{
	struct pref *pref = cookie0;
	struct pref_cpu *prc;
	int s;

	/*
	 * Lock the global counter, block xcalls, and acquire the
	 * per-CPU counter.
	 */
	mutex_enter(&pref->pref_lock);
	s = splsoftserial();
	prc = percpu_getref(pref->pref_percpu);

	/* Count this CPU's users in the global count of users.  */
	pref->pref_count += prc->prc_count;

	percpu_putref(pref->pref_percpu);
	splx(s);
	mutex_exit(&pref->pref_lock);
}

void
pref_drain(struct pref *pref)
{

	KASSERT(!pref->pref_draining);

	/*
	 * Mark it draining:
	 *
	 * - New users will not use it.
	 * - Existing users will notify us when they're done.
	 */
	pref->pref_draining = true;

	/*
	 * Start counting, in pref->pref_count, all the users in the
	 * whole system.  During the xcall, pref->pref_count may have
	 * any sign, if a user acquired a reference on one CPU and
	 * released it on another.  Once this xcall is complete,
	 * pref->pref_count must be nonnegative, because there can
	 * never be a negative number of users on the whole system.
	 */
	xc_wait(xc_broadcast(0, &pref_drain_xc, pref, NULL));

	/*
	 * Wait for the global count of users to drain to zero.
	 */
	mutex_enter(&pref->pref_lock);
	KASSERT(0 <= pref->pref_count);
	while (0 < pref->pref_count)
		cv_wait(&pref->pref_cv, &pref->pref_lock);
	mutex_exit(&pref->pref_lock);
}