• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ rwlock2rw函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中rwlock2rw函数的典型用法代码示例。如果您正苦于以下问题:C++ rwlock2rw函数的具体用法?C++ rwlock2rw怎么用?C++ rwlock2rw使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了rwlock2rw函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: _rw_init_flags

void
_rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
{
	struct rwlock *rw;
	int flags;

	rw = rwlock2rw(c);

	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
	    RW_RECURSE | RW_NEW)) == 0);
	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
	    &rw->rw_lock));

	flags = LO_UPGRADABLE;
	if (opts & RW_DUPOK)
		flags |= LO_DUPOK;
	if (opts & RW_NOPROFILE)
		flags |= LO_NOPROFILE;
	if (!(opts & RW_NOWITNESS))
		flags |= LO_WITNESS;
	if (opts & RW_RECURSE)
		flags |= LO_RECURSABLE;
	if (opts & RW_QUIET)
		flags |= LO_QUIET;
	if (opts & RW_NEW)
		flags |= LO_NEW;

	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
	rw->rw_lock = RW_UNLOCKED;
	rw->rw_recurse = 0;
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:32,代码来源:kern_rwlock.c


示例2: __rw_try_rlock

int
__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	uintptr_t x;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));

	for (;;) {
		x = rw->rw_lock;
		KASSERT(rw->rw_lock != RW_DESTROYED,
		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
		if (!(x & RW_LOCK_READ))
			break;
		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
			    line);
			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
			curthread->td_locks++;
			curthread->td_rw_rlocks++;
			return (1);
		}
	}

	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
	return (0);
}
开发者ID:ornarium,项目名称:freebsd,代码行数:34,代码来源:kern_rwlock.c


示例3: __rw_try_wlock

int
__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));

	if (rw_wlocked(rw) &&
	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		rw->rw_recurse++;
		rval = 1;
	} else
		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
		    (uintptr_t)curthread);

	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		curthread->td_locks++;
	}
	return (rval);
}
开发者ID:ornarium,项目名称:freebsd,代码行数:33,代码来源:kern_rwlock.c


示例4: _rw_destroy

void
_rw_destroy(volatile uintptr_t *c)
{
	struct rwlock *rw;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
	rw->rw_lock = RW_DESTROYED;
	lock_destroy(&rw->lock_object);
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:12,代码来源:kern_rwlock.c


示例5: _rw_wunlock_cookie

void
_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_WLOCKED, file, line);
	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
	    line);
	__rw_wunlock(rw, curthread, file, line);
	curthread->td_locks--;
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:19,代码来源:kern_rwlock.c


示例6: _rw_wlock_cookie

void
_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
	    line, NULL);
	__rw_wlock(rw, curthread, file, line);
	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
	curthread->td_locks++;
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:22,代码来源:kern_rwlock.c


示例7: __rw_wunlock_hard

/*
 * This function is called if the first try at releasing a write lock failed.
 * This means that one of the 2 waiter bits must be set indicating that at
 * least one thread is waiting on this lock.
 */
void
__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
    int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
	uintptr_t v;
	int queue;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	if (rw_wlocked(rw) && rw_recursed(rw)) {
		rw->rw_recurse--;
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
		return;
	}

	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
	    ("%s: neither of the waiter flags are set", __func__));

	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);

	turnstile_chain_lock(&rw->lock_object);
	ts = turnstile_lookup(&rw->lock_object);
	MPASS(ts != NULL);

	/*
	 * Use the same algo as sx locks for now.  Prefer waking up shared
	 * waiters if we have any over writers.  This is probably not ideal.
	 *
	 * 'v' is the value we are going to write back to rw_lock.  If we
	 * have waiters on both queues, we need to preserve the state of
	 * the waiter flag for the queue we don't wake up.  For now this is
	 * hardcoded for the algorithm mentioned above.
	 *
	 * In the case of both readers and writers waiting we wakeup the
	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
	 * new writer comes in before a reader it will claim the lock up
	 * above.  There is probably a potential priority inversion in
	 * there that could be worked around either by waking both queues
	 * of waiters or doing some complicated lock handoff gymnastics.
	 */
	v = RW_UNLOCKED;
	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
		queue = TS_EXCLUSIVE_QUEUE;
		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
	} else
		queue = TS_SHARED_QUEUE;

	/* Wake up all waiters for the specific queue. */
	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
		    queue == TS_SHARED_QUEUE ? "read" : "write");
	turnstile_broadcast(ts, queue);
	atomic_store_rel_ptr(&rw->rw_lock, v);
	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
	turnstile_chain_unlock(&rw->lock_object);
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:68,代码来源:kern_rwlock.c


示例8: __rw_wlock_hard

/*
 * This function is called when we are unable to obtain a write lock on the
 * first try.  This means that at least one other thread holds either a
 * read or write lock.
 */
void
__rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
    int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
	volatile struct thread *owner;
	int spintries = 0;
	int i;
#endif
	uintptr_t v, x;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
#ifdef KDTRACE_HOOKS
	uintptr_t state;
	uint64_t spin_cnt = 0;
	uint64_t sleep_cnt = 0;
	int64_t sleep_time = 0;
	int64_t all_time = 0;
#endif

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	if (rw_wlocked(rw)) {
		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
		    __func__, rw->lock_object.lo_name, file, line));
		rw->rw_recurse++;
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
		return;
	}

	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);

#ifdef KDTRACE_HOOKS
	all_time -= lockstat_nsecs(&rw->lock_object);
	state = rw->rw_lock;
#endif
	while (!_rw_write_lock(rw, tid)) {
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
#ifdef HWPMC_HOOKS
		PMC_SOFT_CALL( , , lock, failed);
#endif
		lock_profile_obtain_lock_failed(&rw->lock_object,
		    &contested, &waittime);
#ifdef ADAPTIVE_RWLOCKS
		/*
		 * If the lock is write locked and the owner is
		 * running on another CPU, spin until the owner stops
		 * running or the state of the lock changes.
		 */
		v = rw->rw_lock;
		owner = (struct thread *)RW_OWNER(v);
		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
			if (LOCK_LOG_TEST(&rw->lock_object, 0))
				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
				    __func__, rw, owner);
			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
			    "spinning", "lockname:\"%s\"",
			    rw->lock_object.lo_name);
			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
			    TD_IS_RUNNING(owner)) {
				cpu_spinwait();
#ifdef KDTRACE_HOOKS
				spin_cnt++;
#endif
			}
			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
			    "running");
			continue;
		}
		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
		    spintries < rowner_retries) {
			if (!(v & RW_LOCK_WRITE_SPINNER)) {
				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
				    v | RW_LOCK_WRITE_SPINNER)) {
					continue;
				}
			}
			spintries++;
			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
			    "spinning", "lockname:\"%s\"",
			    rw->lock_object.lo_name);
			for (i = 0; i < rowner_loops; i++) {
//.........这里部分代码省略.........
开发者ID:jmgurney,项目名称:freebsd,代码行数:101,代码来源:kern_rwlock.c


示例9: _rw_runlock_cookie

void
_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
	uintptr_t x, v, queue;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_RLOCKED, file, line);
	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);

	/* TODO: drop "owner of record" here. */

	for (;;) {
		/*
		 * See if there is more than one read lock held.  If so,
		 * just drop one and return.
		 */
		x = rw->rw_lock;
		if (RW_READERS(x) > 1) {
			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
			    x - RW_ONE_READER)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeeded %p -> %p",
					    __func__, rw, (void *)x,
					    (void *)(x - RW_ONE_READER));
				break;
			}
			continue;
		}
		/*
		 * If there aren't any waiters for a write lock, then try
		 * to drop it quickly.
		 */
		if (!(x & RW_LOCK_WAITERS)) {
			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
			    RW_READERS_LOCK(1));
			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
			    RW_UNLOCKED)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR2(KTR_LOCK, "%s: %p last succeeded",
					    __func__, rw);
				break;
			}
			continue;
		}
		/*
		 * Ok, we know we have waiters and we think we are the
		 * last reader, so grab the turnstile lock.
		 */
		turnstile_chain_lock(&rw->lock_object);
		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
		MPASS(v & RW_LOCK_WAITERS);

		/*
		 * Try to drop our lock leaving the lock in a unlocked
		 * state.
		 *
		 * If you wanted to do explicit lock handoff you'd have to
		 * do it here.  You'd also want to use turnstile_signal()
		 * and you'd have to handle the race where a higher
		 * priority thread blocks on the write lock before the
		 * thread you wakeup actually runs and have the new thread
		 * "steal" the lock.  For now it's a lot simpler to just
		 * wakeup all of the waiters.
		 *
		 * As above, if we fail, then another thread might have
		 * acquired a read lock, so drop the turnstile lock and
		 * restart.
		 */
		x = RW_UNLOCKED;
		if (v & RW_LOCK_WRITE_WAITERS) {
			queue = TS_EXCLUSIVE_QUEUE;
			x |= (v & RW_LOCK_READ_WAITERS);
		} else
			queue = TS_SHARED_QUEUE;
		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
		    x)) {
			turnstile_chain_unlock(&rw->lock_object);
			continue;
		}
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
			    __func__, rw);

		/*
		 * Ok.  The lock is released and all that's left is to
		 * wake up the waiters.  Note that the lock might not be
		 * free anymore, but in that case the writers will just
		 * block again if they run before the new lock holder(s)
		 * release the lock.
		 */
//.........这里部分代码省略.........
开发者ID:jmgurney,项目名称:freebsd,代码行数:101,代码来源:kern_rwlock.c


示例10: __rw_rlock

void
__rw_rlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
	volatile struct thread *owner;
	int spintries = 0;
	int i;
#endif
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	uintptr_t v;
#ifdef KDTRACE_HOOKS
	uintptr_t state;
	uint64_t spin_cnt = 0;
	uint64_t sleep_cnt = 0;
	int64_t sleep_time = 0;
	int64_t all_time = 0;
#endif

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
	KASSERT(rw_wowner(rw) != curthread,
	    ("rw_rlock: wlock already held for %s @ %s:%d",
	    rw->lock_object.lo_name, file, line));
	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);

#ifdef KDTRACE_HOOKS
	all_time -= lockstat_nsecs(&rw->lock_object);
	state = rw->rw_lock;
#endif
	for (;;) {
		/*
		 * Handle the easy case.  If no other thread has a write
		 * lock, then try to bump up the count of read locks.  Note
		 * that we have to preserve the current state of the
		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
		 * read lock, then rw_lock must have changed, so restart
		 * the loop.  Note that this handles the case of a
		 * completely unlocked rwlock since such a lock is encoded
		 * as a read lock with no waiters.
		 */
		v = rw->rw_lock;
		if (RW_CAN_READ(v)) {
			/*
			 * The RW_LOCK_READ_WAITERS flag should only be set
			 * if the lock has been unlocked and write waiters
			 * were present.
			 */
			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
			    v + RW_ONE_READER)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeed %p -> %p", __func__,
					    rw, (void *)v,
					    (void *)(v + RW_ONE_READER));
				break;
			}
			continue;
		}
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
#ifdef HWPMC_HOOKS
		PMC_SOFT_CALL( , , lock, failed);
#endif
		lock_profile_obtain_lock_failed(&rw->lock_object,
		    &contested, &waittime);

#ifdef ADAPTIVE_RWLOCKS
		/*
		 * If the owner is running on another CPU, spin until
		 * the owner stops running or the state of the lock
		 * changes.
		 */
		if ((v & RW_LOCK_READ) == 0) {
			owner = (struct thread *)RW_OWNER(v);
			if (TD_IS_RUNNING(owner)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR3(KTR_LOCK,
					    "%s: spinning on %p held by %p",
					    __func__, rw, owner);
				KTR_STATE1(KTR_SCHED, "thread",
				    sched_tdname(curthread), "spinning",
				    "lockname:\"%s\"", rw->lock_object.lo_name);
				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
				    owner && TD_IS_RUNNING(owner)) {
					cpu_spinwait();
#ifdef KDTRACE_HOOKS
//.........这里部分代码省略.........
开发者ID:jmgurney,项目名称:freebsd,代码行数:101,代码来源:kern_rwlock.c


示例11: _rw_wowned

int
_rw_wowned(const volatile uintptr_t *c)
{

	return (rw_wowner(rwlock2rw(c)) == curthread);
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:6,代码来源:kern_rwlock.c


示例12: __rw_assert

/*
 * In the non-WITNESS case, rw_assert() can only detect that at least
 * *some* thread owns an rlock, but it cannot guarantee that *this*
 * thread owns an rlock.
 */
void
__rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
{
	const struct rwlock *rw;

	if (panicstr != NULL)
		return;

	rw = rwlock2rw(c);

	switch (what) {
	case RA_LOCKED:
	case RA_LOCKED | RA_RECURSED:
	case RA_LOCKED | RA_NOTRECURSED:
	case RA_RLOCKED:
	case RA_RLOCKED | RA_RECURSED:
	case RA_RLOCKED | RA_NOTRECURSED:
#ifdef WITNESS
		witness_assert(&rw->lock_object, what, file, line);
#else
		/*
		 * If some other thread has a write lock or we have one
		 * and are asserting a read lock, fail.  Also, if no one
		 * has a lock at all, fail.
		 */
		if (rw->rw_lock == RW_UNLOCKED ||
		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
		    rw_wowner(rw) != curthread)))
			panic("Lock %s not %slocked @ %s:%d\n",
			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
			    "read " : "", file, line);

		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
			if (rw_recursed(rw)) {
				if (what & RA_NOTRECURSED)
					panic("Lock %s recursed @ %s:%d\n",
					    rw->lock_object.lo_name, file,
					    line);
			} else if (what & RA_RECURSED)
				panic("Lock %s not recursed @ %s:%d\n",
				    rw->lock_object.lo_name, file, line);
		}
#endif
		break;
	case RA_WLOCKED:
	case RA_WLOCKED | RA_RECURSED:
	case RA_WLOCKED | RA_NOTRECURSED:
		if (rw_wowner(rw) != curthread)
			panic("Lock %s not exclusively locked @ %s:%d\n",
			    rw->lock_object.lo_name, file, line);
		if (rw_recursed(rw)) {
			if (what & RA_NOTRECURSED)
				panic("Lock %s recursed @ %s:%d\n",
				    rw->lock_object.lo_name, file, line);
		} else if (what & RA_RECURSED)
			panic("Lock %s not recursed @ %s:%d\n",
			    rw->lock_object.lo_name, file, line);
		break;
	case RA_UNLOCKED:
#ifdef WITNESS
		witness_assert(&rw->lock_object, what, file, line);
#else
		/*
		 * If we hold a write lock fail.  We can't reliably check
		 * to see if we hold a read lock or not.
		 */
		if (rw_wowner(rw) == curthread)
			panic("Lock %s exclusively locked @ %s:%d\n",
			    rw->lock_object.lo_name, file, line);
#endif
		break;
	default:
		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
		    line);
	}
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:81,代码来源:kern_rwlock.c


示例13: __rw_downgrade

/*
 * Downgrade a write lock into a single read lock.
 */
void
__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
	uintptr_t tid, v;
	int rwait, wwait;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
#ifndef INVARIANTS
	if (rw_recursed(rw))
		panic("downgrade of a recursed lock");
#endif

	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);

	/*
	 * Convert from a writer to a single reader.  First we handle
	 * the easy case with no waiters.  If there are any waiters, we
	 * lock the turnstile and "disown" the lock.
	 */
	tid = (uintptr_t)curthread;
	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
		goto out;

	/*
	 * Ok, we think we have waiters, so lock the turnstile so we can
	 * read the waiter flags without any races.
	 */
	turnstile_chain_lock(&rw->lock_object);
	v = rw->rw_lock & RW_LOCK_WAITERS;
	rwait = v & RW_LOCK_READ_WAITERS;
	wwait = v & RW_LOCK_WRITE_WAITERS;
	MPASS(rwait | wwait);

	/*
	 * Downgrade from a write lock while preserving waiters flag
	 * and give up ownership of the turnstile.
	 */
	ts = turnstile_lookup(&rw->lock_object);
	MPASS(ts != NULL);
	if (!wwait)
		v &= ~RW_LOCK_READ_WAITERS;
	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
	/*
	 * Wake other readers if there are no writers pending.  Otherwise they
	 * won't be able to acquire the lock anyway.
	 */
	if (rwait && !wwait) {
		turnstile_broadcast(ts, TS_SHARED_QUEUE);
		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
	} else
		turnstile_disown(ts);
	turnstile_chain_unlock(&rw->lock_object);
out:
	curthread->td_rw_rlocks++;
	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
	LOCKSTAT_RECORD0(rw__downgrade, rw);
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:69,代码来源:kern_rwlock.c


示例14: __rw_try_upgrade

/*
 * Attempt to do a non-blocking upgrade from a read lock to a write
 * lock.  This will only succeed if this thread holds a single read
 * lock.  Returns true if the upgrade succeeded and false otherwise.
 */
int
__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	uintptr_t v, x, tid;
	struct turnstile *ts;
	int success;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_RLOCKED, file, line);

	/*
	 * Attempt to switch from one reader to a writer.  If there
	 * are any write waiters, then we will have to lock the
	 * turnstile first to prevent races with another writer
	 * calling turnstile_wait() before we have claimed this
	 * turnstile.  So, do the simple case of no waiters first.
	 */
	tid = (uintptr_t)curthread;
	success = 0;
	for (;;) {
		v = rw->rw_lock;
		if (RW_READERS(v) > 1)
			break;
		if (!(v & RW_LOCK_WAITERS)) {
			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
			if (!success)
				continue;
			break;
		}

		/*
		 * Ok, we think we have waiters, so lock the turnstile.
		 */
		ts = turnstile_trywait(&rw->lock_object);
		v = rw->rw_lock;
		if (RW_READERS(v) > 1) {
			turnstile_cancel(ts);
			break;
		}
		/*
		 * Try to switch from one reader to a writer again.  This time
		 * we honor the current state of the waiters flags.
		 * If we obtain the lock with the flags set, then claim
		 * ownership of the turnstile.
		 */
		x = rw->rw_lock & RW_LOCK_WAITERS;
		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
		if (success) {
			if (x)
				turnstile_claim(ts);
			else
				turnstile_cancel(ts);
			break;
		}
		turnstile_cancel(ts);
	}
	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
	if (success) {
		curthread->td_rw_rlocks--;
		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		LOCKSTAT_RECORD0(rw__upgrade, rw);
	}
	return (success);
}
开发者ID:jmgurney,项目名称:freebsd,代码行数:77,代码来源:kern_rwlock.c



注:本文中的rwlock2rw函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ rwlock_unlock函数代码示例发布时间:2022-05-30
下一篇:
C++ rw_unlock函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap