return NULL;
xa_init(&ctx->io_bl_xa);
+ xa_init(&ctx->rsrc_free);
/*
* Use 5 bits less than the max cq entries, that should give us around
io_futex_cache_free(ctx);
kvfree(ctx->cancel_table.hbs);
xa_destroy(&ctx->io_bl_xa);
+ xa_destroy(&ctx->rsrc_free);
kfree(ctx);
return NULL;
}
node = req->comp_list.next;
io_req_add_to_cache(req, ctx);
} while (node);
+
+ io_reap_rsrc_nodes(ctx);
}
void __io_submit_flush_completions(struct io_ring_ctx *ctx)
io_futex_cache_free(ctx);
io_destroy_buffers(ctx);
io_unregister_cqwait_reg(ctx);
+ io_reap_rsrc_nodes(ctx);
mutex_unlock(&ctx->uring_lock);
if (ctx->sq_creds)
put_cred(ctx->sq_creds);
io_napi_free(ctx);
kvfree(ctx->cancel_table.hbs);
xa_destroy(&ctx->io_bl_xa);
+ WARN_ON_ONCE(!xa_empty(&ctx->rsrc_free));
+ WARN_ON_ONCE(ctx->rsrc_free_nr);
+ xa_destroy(&ctx->rsrc_free);
kfree(ctx);
}
}
}
+void __io_reap_rsrc_nodes(struct io_ring_ctx *ctx)
+{
+ struct io_rsrc_node *node;
+ unsigned long index;
+
+ /* Find and reap nodes that hit zero refs */
+ xa_for_each(&ctx->rsrc_free, index, node) {
+ if (!node->refs)
+ io_free_rsrc_node(node);
+ }
+}
+
+void io_release_rsrc_node(struct io_rsrc_node *node)
+{
+ struct io_ring_ctx *ctx = io_rsrc_node_ctx(node);
+ unsigned long index = (unsigned long) node;
+
+ /*
+ * If ref dropped to zero, nobody else has a reference to it and we
+ * can safely free it. If it's non-zero, then someone else has a
+ * reference and will put it at some point. As the put side is the
+ * fast path, no checking is done there for the ref dropping to zero.
+ * It just means that nobody else has it, and also that nobody else
+ * can find it as it's been removed from the lookup table prior to
+ * that. A ref dropping to zero as part of fast path node put cannot
+ * happen without the unregister side having already stored the node
+ * in ctx->rsrc_free.
+ */
+ if (!--node->refs) {
+ io_free_rsrc_node(node);
+ return;
+ }
+
+ /*
+ * If the free list already has an entry for this node, then it was
+ * already stashed for cleanup. Just let the normal cleanup reap it.
+ */
+ if (xa_load(&ctx->rsrc_free, index))
+ return;
+
+ /* Slot was reserved at registration time */
+ ctx->rsrc_free_nr++;
+ if (xa_store(&ctx->rsrc_free, index, node, GFP_NOWAIT))
+ WARN_ON_ONCE(1);
+}
+
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type)
{
struct io_rsrc_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node) {
- node->ctx_ptr = (unsigned long) ctx | type;
- node->refs = 1;
+ unsigned long index = (unsigned long) node;
+
+ if (!xa_reserve(&ctx->rsrc_free, index, GFP_KERNEL)) {
+ node->ctx_ptr = (unsigned long) ctx | type;
+ node->refs = 1;
+ return node;
+ }
+ kfree(node);
+ node = NULL;
}
return node;
}
{
if (!data->nr)
return;
- while (data->nr--) {
- if (data->nodes[data->nr])
- io_put_rsrc_node(data->nodes[data->nr]);
- }
+ while (data->nr--)
+ io_reset_rsrc_node(data, data->nr);
kvfree(data->nodes);
data->nodes = NULL;
data->nr = 0;
break;
}
+ if (xa_erase(&ctx->rsrc_free, (unsigned long) node))
+ ctx->rsrc_free_nr--;
kfree(node);
}
io_free_file_tables(&ctx->file_table);
io_file_table_set_alloc_range(ctx, 0, 0);
+ io_reap_rsrc_nodes(ctx);
return 0;
}
if (!ctx->buf_table.nr)
return -ENXIO;
io_rsrc_data_free(&ctx->buf_table);
+ io_reap_rsrc_nodes(ctx);
return 0;
}
if (ret) {
kvfree(imu);
if (node)
- io_put_rsrc_node(node);
+ io_release_rsrc_node(node);
node = ERR_PTR(ret);
}
kvfree(pages);
out_put_free:
i = data.nr;
while (i--) {
- io_buffer_unmap(src_ctx, data.nodes[i]);
- kfree(data.nodes[i]);
+ if (data.nodes[i])
+ io_free_rsrc_node(data.nodes[i]);
}
out_unlock:
io_rsrc_data_free(&data);
};
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type);
+void io_release_rsrc_node(struct io_rsrc_node *node);
+void __io_reap_rsrc_nodes(struct io_ring_ctx *ctx);
void io_free_rsrc_node(struct io_rsrc_node *node);
void io_rsrc_data_free(struct io_rsrc_data *data);
int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
return NULL;
}
+static inline void io_reap_rsrc_nodes(struct io_ring_ctx *ctx)
+{
+ if (ctx->rsrc_free_nr)
+ __io_reap_rsrc_nodes(ctx);
+}
+
+/*
+ * Reaping done at unregistration/removal time, hence no checking needed
+ * here - just drop the held reference.
+ */
static inline void io_put_rsrc_node(struct io_rsrc_node *node)
{
- if (node && !--node->refs)
- io_free_rsrc_node(node);
+ if (node)
+ node->refs--;
}
static inline bool io_reset_rsrc_node(struct io_rsrc_data *data, int index)
if (!node)
return false;
- io_put_rsrc_node(node);
+ io_release_rsrc_node(node);
data->nodes[index] = NULL;
return true;
}