locking: Implement new raw_spinlock
[linux-block.git] / kernel / sched.c
index fd05861b2111005a5a88b386d7d77ee036f5e160..e6acf2d7b753a3b6cb44e7ea9d2bd972d236a438 100644 (file)
@@ -884,7 +884,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 {
 #ifdef CONFIG_DEBUG_SPINLOCK
        /* this is a valid case when another task releases the spinlock */
-       rq->lock.owner = current;
+       rq->lock.rlock.owner = current;
 #endif
        /*
         * If we are tracking spinlock dependencies then we have to