atomic_t fscache_op_debug_id;
EXPORT_SYMBOL(fscache_op_debug_id);
+static void fscache_operation_dummy_cancel(struct fscache_operation *op)
+{
+}
+
+/**
+ * fscache_operation_init - Do basic initialisation of an operation
+ * @op: The operation to initialise
+ * @release: The release function to assign
+ *
+ * Do basic initialisation of an operation. The caller must still set flags,
+ * object and processor if needed.
+ */
+void fscache_operation_init(struct fscache_operation *op,
+ fscache_operation_processor_t processor,
+ fscache_operation_cancel_t cancel,
+ fscache_operation_release_t release)
+{
+ INIT_WORK(&op->work, fscache_op_work_func);
+ atomic_set(&op->usage, 1);
+ op->state = FSCACHE_OP_ST_INITIALISED;
+ op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+ op->processor = processor;
+ op->cancel = cancel ?: fscache_operation_dummy_cancel;
+ op->release = release;
+ INIT_LIST_HEAD(&op->pend_link);
+ fscache_stat(&fscache_n_op_initialised);
+}
+EXPORT_SYMBOL(fscache_operation_init);
+
/**
* fscache_enqueue_operation - Enqueue an operation for processing
* @op: The operation to enqueue
flags = READ_ONCE(object->flags);
if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
fscache_stat(&fscache_n_op_rejected);
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
} else if (unlikely(fscache_cache_is_broken(object))) {
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
ret = -EIO;
} else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
fscache_stat(&fscache_n_op_pend);
ret = 0;
} else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
} else {
fscache_report_unexpected_submission(object, op, ostate);
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
}
flags = READ_ONCE(object->flags);
if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
fscache_stat(&fscache_n_op_rejected);
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
} else if (unlikely(fscache_cache_is_broken(object))) {
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
ret = -EIO;
} else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
fscache_stat(&fscache_n_op_pend);
ret = 0;
} else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
} else {
fscache_report_unexpected_submission(object, op, ostate);
ASSERT(!fscache_object_is_active(object));
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
}
* cancel an operation that's pending on an object
*/
int fscache_cancel_op(struct fscache_operation *op,
- void (*do_cancel)(struct fscache_operation *))
+ bool cancel_in_progress_op)
{
struct fscache_object *object = op->object;
+ bool put = false;
int ret;
_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
ret = -EBUSY;
if (op->state == FSCACHE_OP_ST_PENDING) {
ASSERT(!list_empty(&op->pend_link));
- fscache_stat(&fscache_n_op_cancelled);
list_del_init(&op->pend_link);
- if (do_cancel)
- do_cancel(op);
+ put = true;
+
+ fscache_stat(&fscache_n_op_cancelled);
+ op->cancel(op);
+ op->state = FSCACHE_OP_ST_CANCELLED;
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+ object->n_exclusive--;
+ if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
+ wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+ ret = 0;
+ } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
+ ASSERTCMP(object->n_in_progress, >, 0);
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+ object->n_exclusive--;
+ object->n_in_progress--;
+ if (object->n_in_progress == 0)
+ fscache_start_operations(object);
+
+ fscache_stat(&fscache_n_op_cancelled);
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
object->n_exclusive--;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- fscache_put_operation(op);
ret = 0;
}
+ if (put)
+ fscache_put_operation(op);
spin_unlock(&object->lock);
_leave(" = %d", ret);
return ret;
list_del_init(&op->pend_link);
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
+ op->cancel(op);
op->state = FSCACHE_OP_ST_CANCELLED;
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
spin_lock(&object->lock);
- op->state = cancelled ?
- FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE;
+ if (!cancelled) {
+ op->state = FSCACHE_OP_ST_COMPLETE;
+ } else {
+ op->cancel(op);
+ op->state = FSCACHE_OP_ST_CANCELLED;
+ }
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
object->n_exclusive--;
return;
_debug("PUT OP");
- ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
+ ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
+ op->state != FSCACHE_OP_ST_COMPLETE,
op->state, ==, FSCACHE_OP_ST_CANCELLED);
- op->state = FSCACHE_OP_ST_DEAD;
fscache_stat(&fscache_n_op_release);
op->release(op);
op->release = NULL;
}
+ op->state = FSCACHE_OP_ST_DEAD;
object = op->object;
+ if (likely(object)) {
+ if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
+ atomic_dec(&object->n_reads);
+ if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
+ fscache_unuse_cookie(object);
+
+ /* now... we may get called with the object spinlock held, so we
+ * complete the cleanup here only if we can immediately acquire the
+ * lock, and defer it otherwise */
+ if (!spin_trylock(&object->lock)) {
+ _debug("defer put");
+ fscache_stat(&fscache_n_op_deferred_release);
+
+ cache = object->cache;
+ spin_lock(&cache->op_gc_list_lock);
+ list_add_tail(&op->pend_link, &cache->op_gc_list);
+ spin_unlock(&cache->op_gc_list_lock);
+ schedule_work(&cache->op_gc);
+ _leave(" [defer]");
+ return;
+ }
- if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
- atomic_dec(&object->n_reads);
- if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
- fscache_unuse_cookie(object);
-
- /* now... we may get called with the object spinlock held, so we
- * complete the cleanup here only if we can immediately acquire the
- * lock, and defer it otherwise */
- if (!spin_trylock(&object->lock)) {
- _debug("defer put");
- fscache_stat(&fscache_n_op_deferred_release);
+ ASSERTCMP(object->n_ops, >, 0);
+ object->n_ops--;
+ if (object->n_ops == 0)
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
- cache = object->cache;
- spin_lock(&cache->op_gc_list_lock);
- list_add_tail(&op->pend_link, &cache->op_gc_list);
- spin_unlock(&cache->op_gc_list_lock);
- schedule_work(&cache->op_gc);
- _leave(" [defer]");
- return;
+ spin_unlock(&object->lock);
}
- ASSERTCMP(object->n_ops, >, 0);
- object->n_ops--;
- if (object->n_ops == 0)
- fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
-
- spin_unlock(&object->lock);
-
kfree(op);
_leave(" [done]");
}