aha/include/linux/fscache-cache.h

542 lines
19 KiB
C
Raw Normal View History

/* General filesystem caching backing cache interface
*
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* NOTE!!! See:
*
* Documentation/filesystems/caching/backend-api.txt
*
* for a description of the cache backend interface declared here.
*/
#ifndef _LINUX_FSCACHE_CACHE_H
#define _LINUX_FSCACHE_CACHE_H
#include <linux/fscache.h>
#include <linux/sched.h>
#include <linux/slow-work.h>
#define NR_MAXCACHES BITS_PER_LONG
struct fscache_cache;
struct fscache_cache_ops;
struct fscache_object;
struct fscache_operation;
/*
* cache tag definition
*/
struct fscache_cache_tag {
struct list_head link;
struct fscache_cache *cache; /* cache referred to by this tag */
unsigned long flags;
#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
atomic_t usage;
char name[0]; /* tag name */
};
/*
* cache definition
*/
struct fscache_cache {
const struct fscache_cache_ops *ops;
struct fscache_cache_tag *tag; /* tag representing this cache */
struct kobject *kobj; /* system representation of this cache */
struct list_head link; /* link in list of caches */
size_t max_index_size; /* maximum size of index data */
char identifier[36]; /* cache label */
/* node management */
struct work_struct op_gc; /* operation garbage collector */
struct list_head object_list; /* list of data/index objects */
struct list_head op_gc_list; /* list of ops to be deleted */
spinlock_t object_list_lock;
spinlock_t op_gc_list_lock;
atomic_t object_count; /* no. of live objects in this cache */
struct fscache_object *fsdef; /* object for the fsdef index */
unsigned long flags;
#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
};
extern wait_queue_head_t fscache_cache_cleared_wq;
/*
* operation to be applied to a cache object
* - retrieval initiation operations are done in the context of the process
* that issued them, and not in an async thread pool
*/
typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
struct fscache_operation {
union {
struct work_struct fast_work; /* record for fast ops */
struct slow_work slow_work; /* record for (very) slow ops */
};
struct list_head pend_link; /* link in object->pending_ops */
struct fscache_object *object; /* object to be operated upon */
unsigned long flags;
#define FSCACHE_OP_TYPE 0x000f /* operation type */
#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
#define FSCACHE_OP_DEAD 6 /* op is now dead */
#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */
#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */
atomic_t usage;
unsigned debug_id; /* debugging ID */
/* operation processor callback
* - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
* the op in a non-pool thread */
fscache_operation_processor_t processor;
/* operation releaser */
fscache_operation_release_t release;
#ifdef CONFIG_SLOW_WORK_PROC
const char *name; /* operation name */
const char *state; /* operation state */
#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
#else
#define fscache_set_op_name(OP, N) do { } while(0)
#define fscache_set_op_state(OP, S) do { } while(0)
#endif
};
extern atomic_t fscache_op_debug_id;
extern const struct slow_work_ops fscache_op_slow_work_ops;
extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_put_operation(struct fscache_operation *);
/**
* fscache_operation_init - Do basic initialisation of an operation
* @op: The operation to initialise
* @release: The release function to assign
*
* Do basic initialisation of an operation. The caller must still set flags,
* object, either fast_work or slow_work if necessary, and processor if needed.
*/
static inline void fscache_operation_init(struct fscache_operation *op,
fscache_operation_release_t release)
{
atomic_set(&op->usage, 1);
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
op->release = release;
INIT_LIST_HEAD(&op->pend_link);
fscache_set_op_state(op, "Init");
}
/**
* fscache_operation_init_slow - Do additional initialisation of a slow op
* @op: The operation to initialise
* @processor: The processor function to assign
*
* Do additional initialisation of an operation as required for slow work.
*/
static inline
void fscache_operation_init_slow(struct fscache_operation *op,
fscache_operation_processor_t processor)
{
op->processor = processor;
slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
}
/*
* data read operation
*/
struct fscache_retrieval {
struct fscache_operation op;
struct address_space *mapping; /* netfs pages */
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
void *context; /* netfs read context (pinned) */
struct list_head to_do; /* list of things to be done by the backend */
unsigned long start_time; /* time at which retrieval started */
};
typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
struct page *page,
gfp_t gfp);
typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
struct list_head *pages,
unsigned *nr_pages,
gfp_t gfp);
/**
* fscache_get_retrieval - Get an extra reference on a retrieval operation
* @op: The retrieval operation to get a reference on
*
* Get an extra reference on a retrieval operation.
*/
static inline
struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
{
atomic_inc(&op->op.usage);
return op;
}
/**
* fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
* @op: The retrieval operation affected
*
* Enqueue a retrieval operation for processing by the FS-Cache thread pool.
*/
static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
{
fscache_enqueue_operation(&op->op);
}
/**
* fscache_put_retrieval - Drop a reference to a retrieval operation
* @op: The retrieval operation affected
*
* Drop a reference to a retrieval operation.
*/
static inline void fscache_put_retrieval(struct fscache_retrieval *op)
{
fscache_put_operation(&op->op);
}
/*
* cached page storage work item
* - used to do three things:
* - batch writes to the cache
* - do cache writes asynchronously
* - defer writes until cache object lookup completion
*/
struct fscache_storage {
struct fscache_operation op;
pgoff_t store_limit; /* don't write more than this */
};
/*
* cache operations
*/
struct fscache_cache_ops {
/* name of cache provider */
const char *name;
/* allocate an object record for a cookie */
struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
struct fscache_cookie *cookie);
CacheFiles: Catch an overly long wait for an old active object Catch an overly long wait for an old, dying active object when we want to replace it with a new one. The probability is that all the slow-work threads are hogged, and the delete can't get a look in. What we do instead is: (1) if there's nothing in the slow work queue, we sleep until either the dying object has finished dying or there is something in the slow work queue behind which we can queue our object. (2) if there is something in the slow work queue, we return ETIMEDOUT to fscache_lookup_object(), which then puts us back on the slow work queue, presumably behind the deletion that we're blocked by. We are then deferred for a while until we work our way back through the queue - without blocking a slow-work thread unnecessarily. A backtrace similar to the following may appear in the log without this patch: INFO: task kslowd004:5711 blocked for more than 120 seconds. "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kslowd004 D 0000000000000000 0 5711 2 0x00000080 ffff88000340bb80 0000000000000046 ffff88002550d000 0000000000000000 ffff88002550d000 0000000000000007 ffff88000340bfd8 ffff88002550d2a8 000000000000ddf0 00000000000118c0 00000000000118c0 ffff88002550d2a8 Call Trace: [<ffffffff81058e21>] ? trace_hardirqs_on+0xd/0xf [<ffffffffa011c4d8>] ? cachefiles_wait_bit+0x0/0xd [cachefiles] [<ffffffffa011c4e1>] cachefiles_wait_bit+0x9/0xd [cachefiles] [<ffffffff81353153>] __wait_on_bit+0x43/0x76 [<ffffffff8111ae39>] ? ext3_xattr_get+0x1ec/0x270 [<ffffffff813531ef>] out_of_line_wait_on_bit+0x69/0x74 [<ffffffffa011c4d8>] ? cachefiles_wait_bit+0x0/0xd [cachefiles] [<ffffffff8104c125>] ? wake_bit_function+0x0/0x2e [<ffffffffa011bc79>] cachefiles_mark_object_active+0x203/0x23b [cachefiles] [<ffffffffa011c209>] cachefiles_walk_to_object+0x558/0x827 [cachefiles] [<ffffffffa011a429>] cachefiles_lookup_object+0xac/0x12a [cachefiles] [<ffffffffa00aa1e9>] fscache_lookup_object+0x1c7/0x214 [fscache] [<ffffffffa00aafc5>] fscache_object_state_machine+0xa5/0x52d [fscache] [<ffffffffa00ab4ac>] fscache_object_slow_work_execute+0x5f/0xa0 [fscache] [<ffffffff81082093>] slow_work_execute+0x18f/0x2d1 [<ffffffff8108239a>] slow_work_thread+0x1c5/0x308 [<ffffffff8104c0f1>] ? autoremove_wake_function+0x0/0x34 [<ffffffff810821d5>] ? slow_work_thread+0x0/0x308 [<ffffffff8104be91>] kthread+0x7a/0x82 [<ffffffff8100beda>] child_rip+0xa/0x20 [<ffffffff8100b87c>] ? restore_args+0x0/0x30 [<ffffffff8104be17>] ? kthread+0x0/0x82 [<ffffffff8100bed0>] ? child_rip+0x0/0x20 1 lock held by kslowd004/5711: #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [<ffffffffa011be64>] cachefiles_walk_to_object+0x1b3/0x827 [cachefiles] Signed-off-by: David Howells <dhowells@redhat.com>
2009-11-19 18:12:05 +00:00
/* look up the object for a cookie
* - return -ETIMEDOUT to be requeued
*/
int (*lookup_object)(struct fscache_object *object);
/* finished looking up */
void (*lookup_complete)(struct fscache_object *object);
/* increment the usage count on this object (may fail if unmounting) */
struct fscache_object *(*grab_object)(struct fscache_object *object);
/* pin an object in the cache */
int (*pin_object)(struct fscache_object *object);
/* unpin an object in the cache */
void (*unpin_object)(struct fscache_object *object);
/* store the updated auxilliary data on an object */
void (*update_object)(struct fscache_object *object);
/* discard the resources pinned by an object and effect retirement if
* necessary */
void (*drop_object)(struct fscache_object *object);
/* dispose of a reference to an object */
void (*put_object)(struct fscache_object *object);
/* sync a cache */
void (*sync_cache)(struct fscache_cache *cache);
/* notification that the attributes of a non-index object (such as
* i_size) have changed */
int (*attr_changed)(struct fscache_object *object);
/* reserve space for an object's data and associated metadata */
int (*reserve_space)(struct fscache_object *object, loff_t i_size);
/* request a backing block for a page be read or allocated in the
* cache */
fscache_page_retrieval_func_t read_or_alloc_page;
/* request backing blocks for a list of pages be read or allocated in
* the cache */
fscache_pages_retrieval_func_t read_or_alloc_pages;
/* request a backing block for a page be allocated in the cache so that
* it can be written directly */
fscache_page_retrieval_func_t allocate_page;
/* request backing blocks for pages be allocated in the cache so that
* they can be written directly */
fscache_pages_retrieval_func_t allocate_pages;
/* write a page to its backing block in the cache */
int (*write_page)(struct fscache_storage *op, struct page *page);
/* detach backing block from a page (optional)
* - must release the cookie lock before returning
* - may sleep
*/
void (*uncache_page)(struct fscache_object *object,
struct page *page);
/* dissociate a cache from all the pages it was backing */
void (*dissociate_pages)(struct fscache_cache *cache);
};
/*
* data file or index object cookie
* - a file will only appear in one cache
* - a request to cache a file may or may not be honoured, subject to
* constraints such as disk space
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
atomic_t usage; /* number of users of this cookie */
atomic_t n_children; /* number of children of this cookie */
spinlock_t lock;
FS-Cache: Fix lock misorder in fscache_write_op() FS-Cache has two structs internally for keeping track of the internal state of a cached file: the fscache_cookie struct, which represents the netfs's state, and fscache_object struct, which represents the cache's state. Each has a pointer that points to the other (when both are in existence), and each has a spinlock for pointer maintenance. Since netfs operations approach these structures from the cookie side, they get the cookie lock first, then the object lock. Cache operations, on the other hand, approach from the object side, and get the object lock first. It is not then permitted for a cache operation to get the cookie lock whilst it is holding the object lock lest deadlock occur; instead, it must do one of two things: (1) increment the cookie usage counter, drop the object lock and then get both locks in order, or (2) simply hold the object lock as certain parts of the cookie may not be altered whilst the object lock is held. It is also not permitted to follow either pointer without holding the lock at the end you start with. To break the pointers between the cookie and the object, both locks must be held. fscache_write_op(), however, violates the locking rules: It attempts to get the cookie lock without (a) checking that the cookie pointer is a valid pointer, and (b) holding the object lock to protect the cookie pointer whilst it follows it. This is so that it can access the pending page store tree without interference from __fscache_write_page(). This is fixed by splitting the cookie lock, such that the page store tracking tree is protected by its own lock, and checking that the cookie pointer is non-NULL before we attempt to follow it whilst holding the object lock. The new lock is subordinate to both the cookie lock and the object lock, and so should be taken after those. Signed-off-by: David Howells <dhowells@redhat.com>
2009-11-19 18:11:25 +00:00
spinlock_t stores_lock; /* lock on page store tree */
struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */
void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
FS-Cache: Handle pages pending storage that get evicted under OOM conditions Handle netfs pages that the vmscan algorithm wants to evict from the pagecache under OOM conditions, but that are waiting for write to the cache. Under these conditions, vmscan calls the releasepage() function of the netfs, asking if a page can be discarded. The problem is typified by the following trace of a stuck process: kslowd005 D 0000000000000000 0 4253 2 0x00000080 ffff88001b14f370 0000000000000046 ffff880020d0d000 0000000000000007 0000000000000006 0000000000000001 ffff88001b14ffd8 ffff880020d0d2a8 000000000000ddf0 00000000000118c0 00000000000118c0 ffff880020d0d2a8 Call Trace: [<ffffffffa00782d8>] __fscache_wait_on_page_write+0x8b/0xa7 [fscache] [<ffffffff8104c0f1>] ? autoremove_wake_function+0x0/0x34 [<ffffffffa0078240>] ? __fscache_check_page_write+0x63/0x70 [fscache] [<ffffffffa00b671d>] nfs_fscache_release_page+0x4e/0xc4 [nfs] [<ffffffffa00927f0>] nfs_release_page+0x3c/0x41 [nfs] [<ffffffff810885d3>] try_to_release_page+0x32/0x3b [<ffffffff81093203>] shrink_page_list+0x316/0x4ac [<ffffffff8109372b>] shrink_inactive_list+0x392/0x67c [<ffffffff813532fa>] ? __mutex_unlock_slowpath+0x100/0x10b [<ffffffff81058df0>] ? trace_hardirqs_on_caller+0x10c/0x130 [<ffffffff8135330e>] ? mutex_unlock+0x9/0xb [<ffffffff81093aa2>] shrink_list+0x8d/0x8f [<ffffffff81093d1c>] shrink_zone+0x278/0x33c [<ffffffff81052d6c>] ? ktime_get_ts+0xad/0xba [<ffffffff81094b13>] try_to_free_pages+0x22e/0x392 [<ffffffff81091e24>] ? isolate_pages_global+0x0/0x212 [<ffffffff8108e743>] __alloc_pages_nodemask+0x3dc/0x5cf [<ffffffff81089529>] grab_cache_page_write_begin+0x65/0xaa [<ffffffff8110f8c0>] ext3_write_begin+0x78/0x1eb [<ffffffff81089ec5>] generic_file_buffered_write+0x109/0x28c [<ffffffff8103cb69>] ? current_fs_time+0x22/0x29 [<ffffffff8108a509>] __generic_file_aio_write+0x350/0x385 [<ffffffff8108a588>] ? generic_file_aio_write+0x4a/0xae [<ffffffff8108a59e>] generic_file_aio_write+0x60/0xae [<ffffffff810b2e82>] do_sync_write+0xe3/0x120 [<ffffffff8104c0f1>] ? autoremove_wake_function+0x0/0x34 [<ffffffff810b18e1>] ? __dentry_open+0x1a5/0x2b8 [<ffffffff810b1a76>] ? dentry_open+0x82/0x89 [<ffffffffa00e693c>] cachefiles_write_page+0x298/0x335 [cachefiles] [<ffffffffa0077147>] fscache_write_op+0x178/0x2c2 [fscache] [<ffffffffa0075656>] fscache_op_execute+0x7a/0xd1 [fscache] [<ffffffff81082093>] slow_work_execute+0x18f/0x2d1 [<ffffffff8108239a>] slow_work_thread+0x1c5/0x308 [<ffffffff8104c0f1>] ? autoremove_wake_function+0x0/0x34 [<ffffffff810821d5>] ? slow_work_thread+0x0/0x308 [<ffffffff8104be91>] kthread+0x7a/0x82 [<ffffffff8100beda>] child_rip+0xa/0x20 [<ffffffff8100b87c>] ? restore_args+0x0/0x30 [<ffffffff8102ef83>] ? tg_shares_up+0x171/0x227 [<ffffffff8104be17>] ? kthread+0x0/0x82 [<ffffffff8100bed0>] ? child_rip+0x0/0x20 In the above backtrace, the following is happening: (1) A page storage operation is being executed by a slow-work thread (fscache_write_op()). (2) FS-Cache farms the operation out to the cache to perform (cachefiles_write_page()). (3) CacheFiles is then calling Ext3 to perform the actual write, using Ext3's standard write (do_sync_write()) under KERNEL_DS directly from the netfs page. (4) However, for Ext3 to perform the write, it must allocate some memory, in particular, it must allocate at least one page cache page into which it can copy the data from the netfs page. (5) Under OOM conditions, the memory allocator can't immediately come up with a page, so it uses vmscan to find something to discard (try_to_free_pages()). (6) vmscan finds a clean netfs page it might be able to discard (possibly the one it's trying to write out). (7) The netfs is called to throw the page away (nfs_release_page()) - but it's called with __GFP_WAIT, so the netfs decides to wait for the store to complete (__fscache_wait_on_page_write()). (8) This blocks a slow-work processing thread - possibly against itself. The system ends up stuck because it can't write out any netfs pages to the cache without allocating more memory. To avoid this, we make FS-Cache cancel some writes that aren't in the middle of actually being performed. This means that some data won't make it into the cache this time. To support this, a new FS-Cache function is added fscache_maybe_release_page() that replaces what the netfs releasepage() functions used to do with respect to the cache. The decisions fscache_maybe_release_page() makes are counted and displayed through /proc/fs/fscache/stats on a line labelled "VmScan". There are four counters provided: "nos=N" - pages that weren't pending storage; "gon=N" - pages that were pending storage when we first looked, but weren't by the time we got the object lock; "bsy=N" - pages that we ignored as they were actively being written when we looked; and "can=N" - pages that we cancelled the storage of. What I'd really like to do is alter the behaviour of the cancellation heuristics, depending on how necessary it is to expel pages. If there are plenty of other pages that aren't waiting to be written to the cache that could be ejected first, then it would be nice to hold up on immediate cancellation of cache writes - but I don't see a way of doing that. Signed-off-by: David Howells <dhowells@redhat.com>
2009-11-19 18:11:35 +00:00
#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
unsigned long flags;
#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
};
extern struct fscache_cookie fscache_fsdef_index;
/*
* on-disk cache file or index handle
*/
struct fscache_object {
enum fscache_object_state {
FSCACHE_OBJECT_INIT, /* object in initial unbound state */
FSCACHE_OBJECT_LOOKING_UP, /* looking up object */
FSCACHE_OBJECT_CREATING, /* creating object */
/* active states */
FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
FSCACHE_OBJECT_ACTIVE, /* object is usable */
FSCACHE_OBJECT_UPDATING, /* object is updating */
/* terminal states */
FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */
FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */
FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */
FSCACHE_OBJECT_RELEASING, /* releasing object */
FSCACHE_OBJECT_RECYCLING, /* retiring object */
FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
FSCACHE_OBJECT_DEAD, /* object is now dead */
FSCACHE_OBJECT__NSTATES
} state;
int debug_id; /* debugging ID */
int n_children; /* number of child objects */
int n_ops; /* number of ops outstanding on object */
int n_obj_ops; /* number of object ops outstanding on object */
int n_in_progress; /* number of ops in progress */
int n_exclusive; /* number of exclusive ops queued */
atomic_t n_reads; /* number of read ops in progress */
spinlock_t lock; /* state and operations lock */
unsigned long lookup_jif; /* time at which lookup started */
unsigned long event_mask; /* events this object is interested in */
unsigned long events; /* events to be processed by this object
* (order is important - using fls) */
#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */
#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */
#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */
#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */
#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/
unsigned long flags;
#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
struct fscache_cache *cache; /* cache that supplied this object */
struct fscache_cookie *cookie; /* netfs's file/index object */
struct fscache_object *parent; /* parent object */
struct slow_work work; /* attention scheduling record */
struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */
#ifdef CONFIG_FSCACHE_OBJECT_LIST
struct rb_node objlist_link; /* link in global object list */
#endif
pgoff_t store_limit; /* current storage limit */
loff_t store_limit_l; /* current storage limit */
};
extern const char *fscache_object_states[];
#define fscache_object_is_active(obj) \
(!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
(obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
(obj)->state < FSCACHE_OBJECT_DYING)
#define fscache_object_is_dead(obj) \
(test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
(obj)->state >= FSCACHE_OBJECT_DYING)
extern const struct slow_work_ops fscache_object_slow_work_ops;
/**
* fscache_object_init - Initialise a cache object description
* @object: Object description
*
* Initialise a cache object description to its basic values.
*
* See Documentation/filesystems/caching/backend-api.txt for a complete
* description.
*/
static inline
void fscache_object_init(struct fscache_object *object,
struct fscache_cookie *cookie,
struct fscache_cache *cache)
{
atomic_inc(&cache->object_count);
object->state = FSCACHE_OBJECT_INIT;
spin_lock_init(&object->lock);
INIT_LIST_HEAD(&object->cache_link);
INIT_HLIST_NODE(&object->cookie_link);
vslow_work_init(&object->work, &fscache_object_slow_work_ops);
INIT_LIST_HEAD(&object->dependents);
INIT_LIST_HEAD(&object->dep_link);
INIT_LIST_HEAD(&object->pending_ops);
object->n_children = 0;
object->n_ops = object->n_in_progress = object->n_exclusive = 0;
object->events = object->event_mask = 0;
object->flags = 0;
object->store_limit = 0;
object->store_limit_l = 0;
object->cache = cache;
object->cookie = cookie;
object->parent = NULL;
}
extern void fscache_object_lookup_negative(struct fscache_object *object);
extern void fscache_obtained_object(struct fscache_object *object);
#ifdef CONFIG_FSCACHE_OBJECT_LIST
extern void fscache_object_destroy(struct fscache_object *object);
#else
#define fscache_object_destroy(object) do {} while(0)
#endif
/**
* fscache_object_destroyed - Note destruction of an object in a cache
* @cache: The cache from which the object came
*
* Note the destruction and deallocation of an object record in a cache.
*/
static inline void fscache_object_destroyed(struct fscache_cache *cache)
{
if (atomic_dec_and_test(&cache->object_count))
wake_up_all(&fscache_cache_cleared_wq);
}
/**
* fscache_object_lookup_error - Note an object encountered an error
* @object: The object on which the error was encountered
*
* Note that an object encountered a fatal error (usually an I/O error) and
* that it should be withdrawn as soon as possible.
*/
static inline void fscache_object_lookup_error(struct fscache_object *object)
{
set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
}
/**
* fscache_set_store_limit - Set the maximum size to be stored in an object
* @object: The object to set the maximum on
* @i_size: The limit to set in bytes
*
* Set the maximum size an object is permitted to reach, implying the highest
* byte that may be written. Intended to be called by the attr_changed() op.
*
* See Documentation/filesystems/caching/backend-api.txt for a complete
* description.
*/
static inline
void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
{
object->store_limit_l = i_size;
object->store_limit = i_size >> PAGE_SHIFT;
if (i_size & ~PAGE_MASK)
object->store_limit++;
}
/**
* fscache_end_io - End a retrieval operation on a page
* @op: The FS-Cache operation covering the retrieval
* @page: The page that was to be fetched
* @error: The error code (0 if successful)
*
* Note the end of an operation to retrieve a page, as covered by a particular
* operation record.
*/
static inline void fscache_end_io(struct fscache_retrieval *op,
struct page *page, int error)
{
op->end_io_func(page, op->context, error);
}
/*
* out-of-line cache backend functions
*/
extern void fscache_init_cache(struct fscache_cache *cache,
const struct fscache_cache_ops *ops,
const char *idfmt,
...) __attribute__ ((format (printf, 3, 4)));
extern int fscache_add_cache(struct fscache_cache *cache,
struct fscache_object *fsdef,
const char *tagname);
extern void fscache_withdraw_cache(struct fscache_cache *cache);
extern void fscache_io_error(struct fscache_cache *cache);
extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
struct pagevec *pagevec);
extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data,
uint16_t datalen);
#endif /* _LINUX_FSCACHE_CACHE_H */