struct {
raw_spinlock_t lock;
struct io_wq_work_list work_list;
- unsigned long hash_map;
unsigned flags;
} ____cacheline_aligned_in_smp;
struct task_struct *manager;
struct user_struct *user;
+
+ unsigned long *hash_map;
+
refcount_t refs;
struct completion done;
/* hashed, can run if not already running */
hash = io_get_work_hash(work);
- if (!(wqe->hash_map & BIT(hash))) {
- wqe->hash_map |= BIT(hash);
+ if (!test_and_set_bit(hash, wqe->wq->hash_map)) {
/* all items with this hash lie in [work, tail] */
tail = wqe->hash_tail[hash];
wqe->hash_tail[hash] = NULL;
if (hash != -1U && !next_hashed) {
raw_spin_lock_irq(&wqe->lock);
- wqe->hash_map &= ~BIT_ULL(hash);
+ clear_bit(hash, wq->hash_map);
wqe->flags &= ~IO_WQE_FLAG_STALLED;
/* skip unnecessary unlock-lock wqe->lock */
if (!work)
if (ret)
goto err_wqes;
+ wq->hash_map = data->hash_map;
wq->free_work = data->free_work;
wq->do_work = data->do_work;
unsigned cached_cq_overflow;
unsigned long sq_check_overflow;
+ /* hashed buffered write serialization */
+ unsigned long hash_map;
+
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
unsigned int concurrency;
data.user = ctx->user;
+ data.hash_map = &ctx->hash_map;
data.free_work = io_free_work;
data.do_work = io_wq_submit_work;