forked from luck/tmp_suning_uos_patched
9f10523f89
Fix the state management of internal fscache operations and the accounting of what operations are in what states. This is done by: (1) Give struct fscache_operation a enum variable that directly represents the state it's currently in, rather than spreading this knowledge over a bunch of flags, who's processing the operation at the moment and whether it is queued or not. This makes it easier to write assertions to check the state at various points and to prevent invalid state transitions. (2) Add an 'operation complete' state and supply a function to indicate the completion of an operation (fscache_op_complete()) and make things call it. The final call to fscache_put_operation() can then check that an op in the appropriate state (complete or cancelled). (3) Adjust the use of object->n_ops, ->n_in_progress, ->n_exclusive to better govern the state of an object: (a) The ->n_ops is now the number of extant operations on the object and is now decremented by fscache_put_operation() only. (b) The ->n_in_progress is simply the number of objects that have been taken off of the object's pending queue for the purposes of being run. This is decremented by fscache_op_complete() only. (c) The ->n_exclusive is the number of exclusive ops that have been submitted and queued or are in progress. It is decremented by fscache_op_complete() and by fscache_cancel_op(). fscache_put_operation() and fscache_operation_gc() now no longer try to clean up ->n_exclusive and ->n_in_progress. That was leading to double decrements against fscache_cancel_op(). fscache_cancel_op() now no longer decrements ->n_ops. That was leading to double decrements against fscache_put_operation(). fscache_submit_exclusive_op() now decides whether it has to queue an op based on ->n_in_progress being > 0 rather than ->n_ops > 0 as the latter will persist in being true even after all preceding operations have been cancelled or completed. Furthermore, if an object is active and there are runnable ops against it, there must be at least one op running. (4) Add a remaining-pages counter (n_pages) to struct fscache_retrieval and provide a function to record completion of the pages as they complete. When n_pages reaches 0, the operation is deemed to be complete and fscache_op_complete() is called. Add calls to fscache_retrieval_complete() anywhere we've finished with a page we've been given to read or allocate for. This includes places where we just return pages to the netfs for reading from the server and where accessing the cache fails and we discard the proposed netfs page. The bugs in the unfixed state management manifest themselves as oopses like the following where the operation completion gets out of sync with return of the cookie by the netfs. This is possible because the cache unlocks and returns all the netfs pages before recording its completion - which means that there's nothing to stop the netfs discarding them and returning the cookie. FS-Cache: Cookie 'NFS.fh' still has outstanding reads ------------[ cut here ]------------ kernel BUG at fs/fscache/cookie.c:519! invalid opcode: 0000 [#1] SMP CPU 1 Modules linked in: cachefiles nfs fscache auth_rpcgss nfs_acl lockd sunrpc Pid: 400, comm: kswapd0 Not tainted 3.1.0-rc7-fsdevel+ #1090 /DG965RY RIP: 0010:[<ffffffffa007050a>] [<ffffffffa007050a>] __fscache_relinquish_cookie+0x170/0x343 [fscache] RSP: 0018:ffff8800368cfb00 EFLAGS: 00010282 RAX: 000000000000003c RBX: ffff880023cc8790 RCX: 0000000000000000 RDX: 0000000000002f2e RSI: 0000000000000001 RDI: ffffffff813ab86c RBP: ffff8800368cfb50 R08: 0000000000000002 R09: 0000000000000000 R10: ffff88003a1b7890 R11: ffff88001df6e488 R12: ffff880023d8ed98 R13: ffff880023cc8798 R14: 0000000000000004 R15: ffff88003b8bf370 FS: 0000000000000000(0000) GS:ffff88003bd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000008ba008 CR3: 0000000023d93000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process kswapd0 (pid: 400, threadinfo ffff8800368ce000, task ffff88003b8bf040) Stack: ffff88003b8bf040 ffff88001df6e528 ffff88001df6e528 ffffffffa00b46b0 ffff88003b8bf040 ffff88001df6e488 ffff88001df6e620 ffffffffa00b46b0 ffff88001ebd04c8 0000000000000004 ffff8800368cfb70 ffffffffa00b2c91 Call Trace: [<ffffffffa00b2c91>] nfs_fscache_release_inode_cookie+0x3b/0x47 [nfs] [<ffffffffa008f25f>] nfs_clear_inode+0x3c/0x41 [nfs] [<ffffffffa0090df1>] nfs4_evict_inode+0x2f/0x33 [nfs] [<ffffffff810d8d47>] evict+0xa1/0x15c [<ffffffff810d8e2e>] dispose_list+0x2c/0x38 [<ffffffff810d9ebd>] prune_icache_sb+0x28c/0x29b [<ffffffff810c56b7>] prune_super+0xd5/0x140 [<ffffffff8109b615>] shrink_slab+0x102/0x1ab [<ffffffff8109d690>] balance_pgdat+0x2f2/0x595 [<ffffffff8103e009>] ? process_timeout+0xb/0xb [<ffffffff8109dba3>] kswapd+0x270/0x289 [<ffffffff8104c5ea>] ? __init_waitqueue_head+0x46/0x46 [<ffffffff8109d933>] ? balance_pgdat+0x595/0x595 [<ffffffff8104bf7a>] kthread+0x7f/0x87 [<ffffffff813ad6b4>] kernel_thread_helper+0x4/0x10 [<ffffffff81026b98>] ? finish_task_switch+0x45/0xc0 [<ffffffff813abcdd>] ? retint_restore_args+0xe/0xe [<ffffffff8104befb>] ? __init_kthread_worker+0x53/0x53 [<ffffffff813ad6b0>] ? gs_change+0xb/0xb Signed-off-by: David Howells <dhowells@redhat.com>
497 lines
13 KiB
C
497 lines
13 KiB
C
/* FS-Cache worker operation management routines
|
|
*
|
|
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* See Documentation/filesystems/caching/operations.txt
|
|
*/
|
|
|
|
#define FSCACHE_DEBUG_LEVEL OPERATION
|
|
#include <linux/module.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/slab.h>
|
|
#include "internal.h"
|
|
|
|
atomic_t fscache_op_debug_id;
|
|
EXPORT_SYMBOL(fscache_op_debug_id);
|
|
|
|
/**
|
|
* fscache_enqueue_operation - Enqueue an operation for processing
|
|
* @op: The operation to enqueue
|
|
*
|
|
* Enqueue an operation for processing by the FS-Cache thread pool.
|
|
*
|
|
* This will get its own ref on the object.
|
|
*/
|
|
void fscache_enqueue_operation(struct fscache_operation *op)
|
|
{
|
|
_enter("{OBJ%x OP%x,%u}",
|
|
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
|
|
|
|
ASSERT(list_empty(&op->pend_link));
|
|
ASSERT(op->processor != NULL);
|
|
ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
|
|
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
|
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
|
|
|
|
fscache_stat(&fscache_n_op_enqueue);
|
|
switch (op->flags & FSCACHE_OP_TYPE) {
|
|
case FSCACHE_OP_ASYNC:
|
|
_debug("queue async");
|
|
atomic_inc(&op->usage);
|
|
if (!queue_work(fscache_op_wq, &op->work))
|
|
fscache_put_operation(op);
|
|
break;
|
|
case FSCACHE_OP_MYTHREAD:
|
|
_debug("queue for caller's attention");
|
|
break;
|
|
default:
|
|
printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
|
|
op->flags);
|
|
BUG();
|
|
break;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(fscache_enqueue_operation);
|
|
|
|
/*
|
|
* start an op running
|
|
*/
|
|
static void fscache_run_op(struct fscache_object *object,
|
|
struct fscache_operation *op)
|
|
{
|
|
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
|
|
|
|
op->state = FSCACHE_OP_ST_IN_PROGRESS;
|
|
object->n_in_progress++;
|
|
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
|
|
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
|
|
if (op->processor)
|
|
fscache_enqueue_operation(op);
|
|
fscache_stat(&fscache_n_op_run);
|
|
}
|
|
|
|
/*
|
|
* submit an exclusive operation for an object
|
|
* - other ops are excluded from running simultaneously with this one
|
|
* - this gets any extra refs it needs on an op
|
|
*/
|
|
int fscache_submit_exclusive_op(struct fscache_object *object,
|
|
struct fscache_operation *op)
|
|
{
|
|
_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
|
|
|
|
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
|
|
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
|
|
|
spin_lock(&object->lock);
|
|
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
|
|
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
|
|
ASSERT(list_empty(&op->pend_link));
|
|
|
|
op->state = FSCACHE_OP_ST_PENDING;
|
|
if (fscache_object_is_active(object)) {
|
|
op->object = object;
|
|
object->n_ops++;
|
|
object->n_exclusive++; /* reads and writes must wait */
|
|
|
|
if (object->n_in_progress > 0) {
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
fscache_stat(&fscache_n_op_pend);
|
|
} else if (!list_empty(&object->pending_ops)) {
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
fscache_stat(&fscache_n_op_pend);
|
|
fscache_start_operations(object);
|
|
} else {
|
|
ASSERTCMP(object->n_in_progress, ==, 0);
|
|
fscache_run_op(object, op);
|
|
}
|
|
|
|
/* need to issue a new write op after this */
|
|
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
|
|
} else if (object->state == FSCACHE_OBJECT_CREATING) {
|
|
op->object = object;
|
|
object->n_ops++;
|
|
object->n_exclusive++; /* reads and writes must wait */
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
fscache_stat(&fscache_n_op_pend);
|
|
} else {
|
|
/* not allowed to submit ops in any other state */
|
|
BUG();
|
|
}
|
|
|
|
spin_unlock(&object->lock);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* report an unexpected submission
|
|
*/
|
|
static void fscache_report_unexpected_submission(struct fscache_object *object,
|
|
struct fscache_operation *op,
|
|
unsigned long ostate)
|
|
{
|
|
static bool once_only;
|
|
struct fscache_operation *p;
|
|
unsigned n;
|
|
|
|
if (once_only)
|
|
return;
|
|
once_only = true;
|
|
|
|
kdebug("unexpected submission OP%x [OBJ%x %s]",
|
|
op->debug_id, object->debug_id,
|
|
fscache_object_states[object->state]);
|
|
kdebug("objstate=%s [%s]",
|
|
fscache_object_states[object->state],
|
|
fscache_object_states[ostate]);
|
|
kdebug("objflags=%lx", object->flags);
|
|
kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
|
|
kdebug("ops=%u inp=%u exc=%u",
|
|
object->n_ops, object->n_in_progress, object->n_exclusive);
|
|
|
|
if (!list_empty(&object->pending_ops)) {
|
|
n = 0;
|
|
list_for_each_entry(p, &object->pending_ops, pend_link) {
|
|
ASSERTCMP(p->object, ==, object);
|
|
kdebug("%p %p", op->processor, op->release);
|
|
n++;
|
|
}
|
|
|
|
kdebug("n=%u", n);
|
|
}
|
|
|
|
dump_stack();
|
|
}
|
|
|
|
/*
|
|
* submit an operation for an object
|
|
* - objects may be submitted only in the following states:
|
|
* - during object creation (write ops may be submitted)
|
|
* - whilst the object is active
|
|
* - after an I/O error incurred in one of the two above states (op rejected)
|
|
* - this gets any extra refs it needs on an op
|
|
*/
|
|
int fscache_submit_op(struct fscache_object *object,
|
|
struct fscache_operation *op)
|
|
{
|
|
unsigned long ostate;
|
|
int ret;
|
|
|
|
_enter("{OBJ%x OP%x},{%u}",
|
|
object->debug_id, op->debug_id, atomic_read(&op->usage));
|
|
|
|
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
|
|
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
|
|
|
spin_lock(&object->lock);
|
|
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
|
|
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
|
|
ASSERT(list_empty(&op->pend_link));
|
|
|
|
ostate = object->state;
|
|
smp_rmb();
|
|
|
|
op->state = FSCACHE_OP_ST_PENDING;
|
|
if (fscache_object_is_active(object)) {
|
|
op->object = object;
|
|
object->n_ops++;
|
|
|
|
if (object->n_exclusive > 0) {
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
fscache_stat(&fscache_n_op_pend);
|
|
} else if (!list_empty(&object->pending_ops)) {
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
fscache_stat(&fscache_n_op_pend);
|
|
fscache_start_operations(object);
|
|
} else {
|
|
ASSERTCMP(object->n_exclusive, ==, 0);
|
|
fscache_run_op(object, op);
|
|
}
|
|
ret = 0;
|
|
} else if (object->state == FSCACHE_OBJECT_CREATING) {
|
|
op->object = object;
|
|
object->n_ops++;
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
fscache_stat(&fscache_n_op_pend);
|
|
ret = 0;
|
|
} else if (object->state == FSCACHE_OBJECT_DYING ||
|
|
object->state == FSCACHE_OBJECT_LC_DYING ||
|
|
object->state == FSCACHE_OBJECT_WITHDRAWING) {
|
|
fscache_stat(&fscache_n_op_rejected);
|
|
op->state = FSCACHE_OP_ST_CANCELLED;
|
|
ret = -ENOBUFS;
|
|
} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
|
|
fscache_report_unexpected_submission(object, op, ostate);
|
|
ASSERT(!fscache_object_is_active(object));
|
|
op->state = FSCACHE_OP_ST_CANCELLED;
|
|
ret = -ENOBUFS;
|
|
} else {
|
|
op->state = FSCACHE_OP_ST_CANCELLED;
|
|
ret = -ENOBUFS;
|
|
}
|
|
|
|
spin_unlock(&object->lock);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* queue an object for withdrawal on error, aborting all following asynchronous
|
|
* operations
|
|
*/
|
|
void fscache_abort_object(struct fscache_object *object)
|
|
{
|
|
_enter("{OBJ%x}", object->debug_id);
|
|
|
|
fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
|
|
}
|
|
|
|
/*
|
|
* jump start the operation processing on an object
|
|
* - caller must hold object->lock
|
|
*/
|
|
void fscache_start_operations(struct fscache_object *object)
|
|
{
|
|
struct fscache_operation *op;
|
|
bool stop = false;
|
|
|
|
while (!list_empty(&object->pending_ops) && !stop) {
|
|
op = list_entry(object->pending_ops.next,
|
|
struct fscache_operation, pend_link);
|
|
|
|
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
|
|
if (object->n_in_progress > 0)
|
|
break;
|
|
stop = true;
|
|
}
|
|
list_del_init(&op->pend_link);
|
|
fscache_run_op(object, op);
|
|
|
|
/* the pending queue was holding a ref on the object */
|
|
fscache_put_operation(op);
|
|
}
|
|
|
|
ASSERTCMP(object->n_in_progress, <=, object->n_ops);
|
|
|
|
_debug("woke %d ops on OBJ%x",
|
|
object->n_in_progress, object->debug_id);
|
|
}
|
|
|
|
/*
|
|
* cancel an operation that's pending on an object
|
|
*/
|
|
int fscache_cancel_op(struct fscache_operation *op)
|
|
{
|
|
struct fscache_object *object = op->object;
|
|
int ret;
|
|
|
|
_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
|
|
|
|
ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
|
|
ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
|
|
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
|
|
|
spin_lock(&object->lock);
|
|
|
|
ret = -EBUSY;
|
|
if (op->state == FSCACHE_OP_ST_PENDING) {
|
|
ASSERT(!list_empty(&op->pend_link));
|
|
fscache_stat(&fscache_n_op_cancelled);
|
|
list_del_init(&op->pend_link);
|
|
op->state = FSCACHE_OP_ST_CANCELLED;
|
|
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
|
|
object->n_exclusive--;
|
|
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
|
|
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
|
|
fscache_put_operation(op);
|
|
ret = 0;
|
|
}
|
|
|
|
spin_unlock(&object->lock);
|
|
_leave(" = %d", ret);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Record the completion of an in-progress operation.
|
|
*/
|
|
void fscache_op_complete(struct fscache_operation *op)
|
|
{
|
|
struct fscache_object *object = op->object;
|
|
|
|
_enter("OBJ%x", object->debug_id);
|
|
|
|
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
|
|
ASSERTCMP(object->n_in_progress, >, 0);
|
|
ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
|
|
object->n_exclusive, >, 0);
|
|
ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
|
|
object->n_in_progress, ==, 1);
|
|
|
|
spin_lock(&object->lock);
|
|
|
|
op->state = FSCACHE_OP_ST_COMPLETE;
|
|
|
|
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
|
|
object->n_exclusive--;
|
|
object->n_in_progress--;
|
|
if (object->n_in_progress == 0)
|
|
fscache_start_operations(object);
|
|
|
|
spin_unlock(&object->lock);
|
|
_leave("");
|
|
}
|
|
EXPORT_SYMBOL(fscache_op_complete);
|
|
|
|
/*
|
|
* release an operation
|
|
* - queues pending ops if this is the last in-progress op
|
|
*/
|
|
void fscache_put_operation(struct fscache_operation *op)
|
|
{
|
|
struct fscache_object *object;
|
|
struct fscache_cache *cache;
|
|
|
|
_enter("{OBJ%x OP%x,%d}",
|
|
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
|
|
|
|
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
|
|
|
if (!atomic_dec_and_test(&op->usage))
|
|
return;
|
|
|
|
_debug("PUT OP");
|
|
ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
|
|
op->state, ==, FSCACHE_OP_ST_CANCELLED);
|
|
op->state = FSCACHE_OP_ST_DEAD;
|
|
|
|
fscache_stat(&fscache_n_op_release);
|
|
|
|
if (op->release) {
|
|
op->release(op);
|
|
op->release = NULL;
|
|
}
|
|
|
|
object = op->object;
|
|
|
|
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) {
|
|
if (atomic_dec_and_test(&object->n_reads)) {
|
|
clear_bit(FSCACHE_COOKIE_WAITING_ON_READS,
|
|
&object->cookie->flags);
|
|
wake_up_bit(&object->cookie->flags,
|
|
FSCACHE_COOKIE_WAITING_ON_READS);
|
|
}
|
|
}
|
|
|
|
/* now... we may get called with the object spinlock held, so we
|
|
* complete the cleanup here only if we can immediately acquire the
|
|
* lock, and defer it otherwise */
|
|
if (!spin_trylock(&object->lock)) {
|
|
_debug("defer put");
|
|
fscache_stat(&fscache_n_op_deferred_release);
|
|
|
|
cache = object->cache;
|
|
spin_lock(&cache->op_gc_list_lock);
|
|
list_add_tail(&op->pend_link, &cache->op_gc_list);
|
|
spin_unlock(&cache->op_gc_list_lock);
|
|
schedule_work(&cache->op_gc);
|
|
_leave(" [defer]");
|
|
return;
|
|
}
|
|
|
|
ASSERTCMP(object->n_ops, >, 0);
|
|
object->n_ops--;
|
|
if (object->n_ops == 0)
|
|
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
|
|
|
|
spin_unlock(&object->lock);
|
|
|
|
kfree(op);
|
|
_leave(" [done]");
|
|
}
|
|
EXPORT_SYMBOL(fscache_put_operation);
|
|
|
|
/*
|
|
* garbage collect operations that have had their release deferred
|
|
*/
|
|
void fscache_operation_gc(struct work_struct *work)
|
|
{
|
|
struct fscache_operation *op;
|
|
struct fscache_object *object;
|
|
struct fscache_cache *cache =
|
|
container_of(work, struct fscache_cache, op_gc);
|
|
int count = 0;
|
|
|
|
_enter("");
|
|
|
|
do {
|
|
spin_lock(&cache->op_gc_list_lock);
|
|
if (list_empty(&cache->op_gc_list)) {
|
|
spin_unlock(&cache->op_gc_list_lock);
|
|
break;
|
|
}
|
|
|
|
op = list_entry(cache->op_gc_list.next,
|
|
struct fscache_operation, pend_link);
|
|
list_del(&op->pend_link);
|
|
spin_unlock(&cache->op_gc_list_lock);
|
|
|
|
object = op->object;
|
|
spin_lock(&object->lock);
|
|
|
|
_debug("GC DEFERRED REL OBJ%x OP%x",
|
|
object->debug_id, op->debug_id);
|
|
fscache_stat(&fscache_n_op_gc);
|
|
|
|
ASSERTCMP(atomic_read(&op->usage), ==, 0);
|
|
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
|
|
|
|
ASSERTCMP(object->n_ops, >, 0);
|
|
object->n_ops--;
|
|
if (object->n_ops == 0)
|
|
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
|
|
|
|
spin_unlock(&object->lock);
|
|
kfree(op);
|
|
|
|
} while (count++ < 20);
|
|
|
|
if (!list_empty(&cache->op_gc_list))
|
|
schedule_work(&cache->op_gc);
|
|
|
|
_leave("");
|
|
}
|
|
|
|
/*
|
|
* execute an operation using fs_op_wq to provide processing context -
|
|
* the caller holds a ref to this object, so we don't need to hold one
|
|
*/
|
|
void fscache_op_work_func(struct work_struct *work)
|
|
{
|
|
struct fscache_operation *op =
|
|
container_of(work, struct fscache_operation, work);
|
|
unsigned long start;
|
|
|
|
_enter("{OBJ%x OP%x,%d}",
|
|
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
|
|
|
|
ASSERT(op->processor != NULL);
|
|
start = jiffies;
|
|
op->processor(op);
|
|
fscache_hist(fscache_ops_histogram, start);
|
|
fscache_put_operation(op);
|
|
|
|
_leave("");
|
|
}
|