summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2012-05-23 12:16:30 +0200
committerJens Axboe <axboe@kernel.dk>2012-05-23 12:16:30 +0200
commit266307e19baf80c3726afc0e17ae5181da007c1e (patch)
tree867f9254e069c3c498b990a26354170c534264ed
parent21731057017b1f52b6b907abc7127c0257a0166f (diff)
parentff26eaadf4d914e397872b99885d45756104e9ae (diff)
Merge branch 'for-3.5/core' into for-next
-rw-r--r--block/blk-throttle.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 14dedecfc7e8..5b0659512047 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -219,6 +219,7 @@ alloc_stats:
static void throtl_pd_init(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
RB_CLEAR_NODE(&tg->rb_node);
bio_list_init(&tg->bio_lists[0]);
@@ -235,19 +236,20 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
* but percpu allocator can't be called from IO path. Queue tg on
* tg_stats_alloc_list and allocate from work item.
*/
- spin_lock(&tg_stats_alloc_lock);
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
- spin_unlock(&tg_stats_alloc_lock);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
static void throtl_pd_exit(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
- spin_lock(&tg_stats_alloc_lock);
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_del_init(&tg->stats_alloc_node);
- spin_unlock(&tg_stats_alloc_lock);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
free_percpu(tg->stats_cpu);
}