synch: minor mutex lock checking optimization

This saves 23 bytes of instructions on amd64 which is not worth it, but we
will need the out label for recursive lock policies anyway.

Signed-off-by: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
1 files changed, 5 insertions(+), 0 deletions(-)

M synch.c
M synch.c +5 -0
@@ 503,6 503,10 @@ static void check_unheld_for_lock(struct
 	if (!atomic_read(&lockdep_on))
 		return;
 
+	held = last_acquired_lock();
+	if (!held)
+		goto out; /* nothing held, nothing to check */
+
 	/* check for deadlocks & recursive locking */
 	for_each_held_lock(i, held) {
 		if ((held->info != info) && (held->info->lc != info->lc))

          
@@ 516,6 520,7 @@ static void check_unheld_for_lock(struct
 	if (check_circular_deps(info, where))
 		return;
 
+out:
 	held = held_stack_alloc();
 	if (!held) {
 		error_alloc(info, where, "lock nesting limit reached");