GNU Linux-libre 4.9.337-gnu1
[releases.git] / fs / fscache / page.c
1 /* Cache page management and data I/O routines
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19
20 /*
21  * check to see if a page is being written to the cache
22  */
23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24 {
25         void *val;
26
27         rcu_read_lock();
28         val = radix_tree_lookup(&cookie->stores, page->index);
29         rcu_read_unlock();
30
31         return val != NULL;
32 }
33 EXPORT_SYMBOL(__fscache_check_page_write);
34
35 /*
36  * wait for a page to finish being written to the cache
37  */
38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39 {
40         wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41
42         wait_event(*wq, !__fscache_check_page_write(cookie, page));
43 }
44 EXPORT_SYMBOL(__fscache_wait_on_page_write);
45
46 /*
47  * wait for a page to finish being written to the cache. Put a timeout here
48  * since we might be called recursively via parent fs.
49  */
50 static
51 bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
52 {
53         wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
54
55         return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
56                                   HZ);
57 }
58
59 /*
60  * decide whether a page can be released, possibly by cancelling a store to it
61  * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
62  */
63 bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
64                                   struct page *page,
65                                   gfp_t gfp)
66 {
67         struct page *xpage;
68         void *val;
69
70         _enter("%p,%p,%x", cookie, page, gfp);
71
72 try_again:
73         rcu_read_lock();
74         val = radix_tree_lookup(&cookie->stores, page->index);
75         if (!val) {
76                 rcu_read_unlock();
77                 fscache_stat(&fscache_n_store_vmscan_not_storing);
78                 __fscache_uncache_page(cookie, page);
79                 return true;
80         }
81
82         /* see if the page is actually undergoing storage - if so we can't get
83          * rid of it till the cache has finished with it */
84         if (radix_tree_tag_get(&cookie->stores, page->index,
85                                FSCACHE_COOKIE_STORING_TAG)) {
86                 rcu_read_unlock();
87                 goto page_busy;
88         }
89
90         /* the page is pending storage, so we attempt to cancel the store and
91          * discard the store request so that the page can be reclaimed */
92         spin_lock(&cookie->stores_lock);
93         rcu_read_unlock();
94
95         if (radix_tree_tag_get(&cookie->stores, page->index,
96                                FSCACHE_COOKIE_STORING_TAG)) {
97                 /* the page started to undergo storage whilst we were looking,
98                  * so now we can only wait or return */
99                 spin_unlock(&cookie->stores_lock);
100                 goto page_busy;
101         }
102
103         xpage = radix_tree_delete(&cookie->stores, page->index);
104         spin_unlock(&cookie->stores_lock);
105
106         if (xpage) {
107                 fscache_stat(&fscache_n_store_vmscan_cancelled);
108                 fscache_stat(&fscache_n_store_radix_deletes);
109                 ASSERTCMP(xpage, ==, page);
110         } else {
111                 fscache_stat(&fscache_n_store_vmscan_gone);
112         }
113
114         wake_up_bit(&cookie->flags, 0);
115         if (xpage)
116                 put_page(xpage);
117         __fscache_uncache_page(cookie, page);
118         return true;
119
120 page_busy:
121         /* We will wait here if we're allowed to, but that could deadlock the
122          * allocator as the work threads writing to the cache may all end up
123          * sleeping on memory allocation, so we may need to impose a timeout
124          * too. */
125         if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
126                 fscache_stat(&fscache_n_store_vmscan_busy);
127                 return false;
128         }
129
130         fscache_stat(&fscache_n_store_vmscan_wait);
131         if (!release_page_wait_timeout(cookie, page))
132                 _debug("fscache writeout timeout page: %p{%lx}",
133                         page, page->index);
134
135         gfp &= ~__GFP_DIRECT_RECLAIM;
136         goto try_again;
137 }
138 EXPORT_SYMBOL(__fscache_maybe_release_page);
139
140 /*
141  * note that a page has finished being written to the cache
142  */
143 static void fscache_end_page_write(struct fscache_object *object,
144                                    struct page *page)
145 {
146         struct fscache_cookie *cookie;
147         struct page *xpage = NULL;
148
149         spin_lock(&object->lock);
150         cookie = object->cookie;
151         if (cookie) {
152                 /* delete the page from the tree if it is now no longer
153                  * pending */
154                 spin_lock(&cookie->stores_lock);
155                 radix_tree_tag_clear(&cookie->stores, page->index,
156                                      FSCACHE_COOKIE_STORING_TAG);
157                 if (!radix_tree_tag_get(&cookie->stores, page->index,
158                                         FSCACHE_COOKIE_PENDING_TAG)) {
159                         fscache_stat(&fscache_n_store_radix_deletes);
160                         xpage = radix_tree_delete(&cookie->stores, page->index);
161                 }
162                 spin_unlock(&cookie->stores_lock);
163                 wake_up_bit(&cookie->flags, 0);
164         }
165         spin_unlock(&object->lock);
166         if (xpage)
167                 put_page(xpage);
168 }
169
170 /*
171  * actually apply the changed attributes to a cache object
172  */
173 static void fscache_attr_changed_op(struct fscache_operation *op)
174 {
175         struct fscache_object *object = op->object;
176         int ret;
177
178         _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
179
180         fscache_stat(&fscache_n_attr_changed_calls);
181
182         if (fscache_object_is_active(object)) {
183                 fscache_stat(&fscache_n_cop_attr_changed);
184                 ret = object->cache->ops->attr_changed(object);
185                 fscache_stat_d(&fscache_n_cop_attr_changed);
186                 if (ret < 0)
187                         fscache_abort_object(object);
188         }
189
190         fscache_op_complete(op, true);
191         _leave("");
192 }
193
194 /*
195  * notification that the attributes on an object have changed
196  */
197 int __fscache_attr_changed(struct fscache_cookie *cookie)
198 {
199         struct fscache_operation *op;
200         struct fscache_object *object;
201         bool wake_cookie = false;
202
203         _enter("%p", cookie);
204
205         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
206
207         fscache_stat(&fscache_n_attr_changed);
208
209         op = kzalloc(sizeof(*op), GFP_KERNEL);
210         if (!op) {
211                 fscache_stat(&fscache_n_attr_changed_nomem);
212                 _leave(" = -ENOMEM");
213                 return -ENOMEM;
214         }
215
216         fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL);
217         op->flags = FSCACHE_OP_ASYNC |
218                 (1 << FSCACHE_OP_EXCLUSIVE) |
219                 (1 << FSCACHE_OP_UNUSE_COOKIE);
220
221         spin_lock(&cookie->lock);
222
223         if (!fscache_cookie_enabled(cookie) ||
224             hlist_empty(&cookie->backing_objects))
225                 goto nobufs;
226         object = hlist_entry(cookie->backing_objects.first,
227                              struct fscache_object, cookie_link);
228
229         __fscache_use_cookie(cookie);
230         if (fscache_submit_exclusive_op(object, op) < 0)
231                 goto nobufs_dec;
232         spin_unlock(&cookie->lock);
233         fscache_stat(&fscache_n_attr_changed_ok);
234         fscache_put_operation(op);
235         _leave(" = 0");
236         return 0;
237
238 nobufs_dec:
239         wake_cookie = __fscache_unuse_cookie(cookie);
240 nobufs:
241         spin_unlock(&cookie->lock);
242         fscache_put_operation(op);
243         if (wake_cookie)
244                 __fscache_wake_unused_cookie(cookie);
245         fscache_stat(&fscache_n_attr_changed_nobufs);
246         _leave(" = %d", -ENOBUFS);
247         return -ENOBUFS;
248 }
249 EXPORT_SYMBOL(__fscache_attr_changed);
250
251 /*
252  * Handle cancellation of a pending retrieval op
253  */
254 static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
255 {
256         struct fscache_retrieval *op =
257                 container_of(_op, struct fscache_retrieval, op);
258
259         atomic_set(&op->n_pages, 0);
260 }
261
262 /*
263  * release a retrieval op reference
264  */
265 static void fscache_release_retrieval_op(struct fscache_operation *_op)
266 {
267         struct fscache_retrieval *op =
268                 container_of(_op, struct fscache_retrieval, op);
269
270         _enter("{OP%x}", op->op.debug_id);
271
272         ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
273                     atomic_read(&op->n_pages), ==, 0);
274
275         fscache_hist(fscache_retrieval_histogram, op->start_time);
276         if (op->context)
277                 fscache_put_context(op->cookie, op->context);
278
279         _leave("");
280 }
281
282 /*
283  * allocate a retrieval op
284  */
285 static struct fscache_retrieval *fscache_alloc_retrieval(
286         struct fscache_cookie *cookie,
287         struct address_space *mapping,
288         fscache_rw_complete_t end_io_func,
289         void *context)
290 {
291         struct fscache_retrieval *op;
292
293         /* allocate a retrieval operation and attempt to submit it */
294         op = kzalloc(sizeof(*op), GFP_NOIO);
295         if (!op) {
296                 fscache_stat(&fscache_n_retrievals_nomem);
297                 return NULL;
298         }
299
300         fscache_operation_init(&op->op, NULL,
301                                fscache_do_cancel_retrieval,
302                                fscache_release_retrieval_op);
303         op->op.flags    = FSCACHE_OP_MYTHREAD |
304                 (1UL << FSCACHE_OP_WAITING) |
305                 (1UL << FSCACHE_OP_UNUSE_COOKIE);
306         op->cookie      = cookie;
307         op->mapping     = mapping;
308         op->end_io_func = end_io_func;
309         op->context     = context;
310         op->start_time  = jiffies;
311         INIT_LIST_HEAD(&op->to_do);
312
313         /* Pin the netfs read context in case we need to do the actual netfs
314          * read because we've encountered a cache read failure.
315          */
316         if (context)
317                 fscache_get_context(op->cookie, context);
318         return op;
319 }
320
321 /*
322  * wait for a deferred lookup to complete
323  */
324 int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
325 {
326         unsigned long jif;
327
328         _enter("");
329
330         if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
331                 _leave(" = 0 [imm]");
332                 return 0;
333         }
334
335         fscache_stat(&fscache_n_retrievals_wait);
336
337         jif = jiffies;
338         if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
339                         TASK_INTERRUPTIBLE) != 0) {
340                 fscache_stat(&fscache_n_retrievals_intr);
341                 _leave(" = -ERESTARTSYS");
342                 return -ERESTARTSYS;
343         }
344
345         ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
346
347         smp_rmb();
348         fscache_hist(fscache_retrieval_delay_histogram, jif);
349         _leave(" = 0 [dly]");
350         return 0;
351 }
352
353 /*
354  * wait for an object to become active (or dead)
355  */
356 int fscache_wait_for_operation_activation(struct fscache_object *object,
357                                           struct fscache_operation *op,
358                                           atomic_t *stat_op_waits,
359                                           atomic_t *stat_object_dead)
360 {
361         int ret;
362
363         if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
364                 goto check_if_dead;
365
366         _debug(">>> WT");
367         if (stat_op_waits)
368                 fscache_stat(stat_op_waits);
369         if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
370                         TASK_INTERRUPTIBLE) != 0) {
371                 ret = fscache_cancel_op(op, false);
372                 if (ret == 0)
373                         return -ERESTARTSYS;
374
375                 /* it's been removed from the pending queue by another party,
376                  * so we should get to run shortly */
377                 wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
378                             TASK_UNINTERRUPTIBLE);
379         }
380         _debug("<<< GO");
381
382 check_if_dead:
383         if (op->state == FSCACHE_OP_ST_CANCELLED) {
384                 if (stat_object_dead)
385                         fscache_stat(stat_object_dead);
386                 _leave(" = -ENOBUFS [cancelled]");
387                 return -ENOBUFS;
388         }
389         if (unlikely(fscache_object_is_dying(object) ||
390                      fscache_cache_is_broken(object))) {
391                 enum fscache_operation_state state = op->state;
392                 fscache_cancel_op(op, true);
393                 if (stat_object_dead)
394                         fscache_stat(stat_object_dead);
395                 _leave(" = -ENOBUFS [obj dead %d]", state);
396                 return -ENOBUFS;
397         }
398         return 0;
399 }
400
401 /*
402  * read a page from the cache or allocate a block in which to store it
403  * - we return:
404  *   -ENOMEM    - out of memory, nothing done
405  *   -ERESTARTSYS - interrupted
406  *   -ENOBUFS   - no backing object available in which to cache the block
407  *   -ENODATA   - no data available in the backing object for this block
408  *   0          - dispatched a read - it'll call end_io_func() when finished
409  */
410 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
411                                  struct page *page,
412                                  fscache_rw_complete_t end_io_func,
413                                  void *context,
414                                  gfp_t gfp)
415 {
416         struct fscache_retrieval *op;
417         struct fscache_object *object;
418         bool wake_cookie = false;
419         int ret;
420
421         _enter("%p,%p,,,", cookie, page);
422
423         fscache_stat(&fscache_n_retrievals);
424
425         if (hlist_empty(&cookie->backing_objects))
426                 goto nobufs;
427
428         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
429                 _leave(" = -ENOBUFS [invalidating]");
430                 return -ENOBUFS;
431         }
432
433         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
434         ASSERTCMP(page, !=, NULL);
435
436         if (fscache_wait_for_deferred_lookup(cookie) < 0)
437                 return -ERESTARTSYS;
438
439         op = fscache_alloc_retrieval(cookie, page->mapping,
440                                      end_io_func, context);
441         if (!op) {
442                 _leave(" = -ENOMEM");
443                 return -ENOMEM;
444         }
445         atomic_set(&op->n_pages, 1);
446
447         spin_lock(&cookie->lock);
448
449         if (!fscache_cookie_enabled(cookie) ||
450             hlist_empty(&cookie->backing_objects))
451                 goto nobufs_unlock;
452         object = hlist_entry(cookie->backing_objects.first,
453                              struct fscache_object, cookie_link);
454
455         ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
456
457         __fscache_use_cookie(cookie);
458         atomic_inc(&object->n_reads);
459         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
460
461         if (fscache_submit_op(object, &op->op) < 0)
462                 goto nobufs_unlock_dec;
463         spin_unlock(&cookie->lock);
464
465         fscache_stat(&fscache_n_retrieval_ops);
466
467         /* we wait for the operation to become active, and then process it
468          * *here*, in this thread, and not in the thread pool */
469         ret = fscache_wait_for_operation_activation(
470                 object, &op->op,
471                 __fscache_stat(&fscache_n_retrieval_op_waits),
472                 __fscache_stat(&fscache_n_retrievals_object_dead));
473         if (ret < 0)
474                 goto error;
475
476         /* ask the cache to honour the operation */
477         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
478                 fscache_stat(&fscache_n_cop_allocate_page);
479                 ret = object->cache->ops->allocate_page(op, page, gfp);
480                 fscache_stat_d(&fscache_n_cop_allocate_page);
481                 if (ret == 0)
482                         ret = -ENODATA;
483         } else {
484                 fscache_stat(&fscache_n_cop_read_or_alloc_page);
485                 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
486                 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
487         }
488
489 error:
490         if (ret == -ENOMEM)
491                 fscache_stat(&fscache_n_retrievals_nomem);
492         else if (ret == -ERESTARTSYS)
493                 fscache_stat(&fscache_n_retrievals_intr);
494         else if (ret == -ENODATA)
495                 fscache_stat(&fscache_n_retrievals_nodata);
496         else if (ret < 0)
497                 fscache_stat(&fscache_n_retrievals_nobufs);
498         else
499                 fscache_stat(&fscache_n_retrievals_ok);
500
501         fscache_put_retrieval(op);
502         _leave(" = %d", ret);
503         return ret;
504
505 nobufs_unlock_dec:
506         atomic_dec(&object->n_reads);
507         wake_cookie = __fscache_unuse_cookie(cookie);
508 nobufs_unlock:
509         spin_unlock(&cookie->lock);
510         if (wake_cookie)
511                 __fscache_wake_unused_cookie(cookie);
512         fscache_put_retrieval(op);
513 nobufs:
514         fscache_stat(&fscache_n_retrievals_nobufs);
515         _leave(" = -ENOBUFS");
516         return -ENOBUFS;
517 }
518 EXPORT_SYMBOL(__fscache_read_or_alloc_page);
519
520 /*
521  * read a list of page from the cache or allocate a block in which to store
522  * them
523  * - we return:
524  *   -ENOMEM    - out of memory, some pages may be being read
525  *   -ERESTARTSYS - interrupted, some pages may be being read
526  *   -ENOBUFS   - no backing object or space available in which to cache any
527  *                pages not being read
528  *   -ENODATA   - no data available in the backing object for some or all of
529  *                the pages
530  *   0          - dispatched a read on all pages
531  *
532  * end_io_func() will be called for each page read from the cache as it is
533  * finishes being read
534  *
535  * any pages for which a read is dispatched will be removed from pages and
536  * nr_pages
537  */
538 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
539                                   struct address_space *mapping,
540                                   struct list_head *pages,
541                                   unsigned *nr_pages,
542                                   fscache_rw_complete_t end_io_func,
543                                   void *context,
544                                   gfp_t gfp)
545 {
546         struct fscache_retrieval *op;
547         struct fscache_object *object;
548         bool wake_cookie = false;
549         int ret;
550
551         _enter("%p,,%d,,,", cookie, *nr_pages);
552
553         fscache_stat(&fscache_n_retrievals);
554
555         if (hlist_empty(&cookie->backing_objects))
556                 goto nobufs;
557
558         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
559                 _leave(" = -ENOBUFS [invalidating]");
560                 return -ENOBUFS;
561         }
562
563         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
564         ASSERTCMP(*nr_pages, >, 0);
565         ASSERT(!list_empty(pages));
566
567         if (fscache_wait_for_deferred_lookup(cookie) < 0)
568                 return -ERESTARTSYS;
569
570         op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
571         if (!op)
572                 return -ENOMEM;
573         atomic_set(&op->n_pages, *nr_pages);
574
575         spin_lock(&cookie->lock);
576
577         if (!fscache_cookie_enabled(cookie) ||
578             hlist_empty(&cookie->backing_objects))
579                 goto nobufs_unlock;
580         object = hlist_entry(cookie->backing_objects.first,
581                              struct fscache_object, cookie_link);
582
583         __fscache_use_cookie(cookie);
584         atomic_inc(&object->n_reads);
585         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
586
587         if (fscache_submit_op(object, &op->op) < 0)
588                 goto nobufs_unlock_dec;
589         spin_unlock(&cookie->lock);
590
591         fscache_stat(&fscache_n_retrieval_ops);
592
593         /* we wait for the operation to become active, and then process it
594          * *here*, in this thread, and not in the thread pool */
595         ret = fscache_wait_for_operation_activation(
596                 object, &op->op,
597                 __fscache_stat(&fscache_n_retrieval_op_waits),
598                 __fscache_stat(&fscache_n_retrievals_object_dead));
599         if (ret < 0)
600                 goto error;
601
602         /* ask the cache to honour the operation */
603         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
604                 fscache_stat(&fscache_n_cop_allocate_pages);
605                 ret = object->cache->ops->allocate_pages(
606                         op, pages, nr_pages, gfp);
607                 fscache_stat_d(&fscache_n_cop_allocate_pages);
608         } else {
609                 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
610                 ret = object->cache->ops->read_or_alloc_pages(
611                         op, pages, nr_pages, gfp);
612                 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
613         }
614
615 error:
616         if (ret == -ENOMEM)
617                 fscache_stat(&fscache_n_retrievals_nomem);
618         else if (ret == -ERESTARTSYS)
619                 fscache_stat(&fscache_n_retrievals_intr);
620         else if (ret == -ENODATA)
621                 fscache_stat(&fscache_n_retrievals_nodata);
622         else if (ret < 0)
623                 fscache_stat(&fscache_n_retrievals_nobufs);
624         else
625                 fscache_stat(&fscache_n_retrievals_ok);
626
627         fscache_put_retrieval(op);
628         _leave(" = %d", ret);
629         return ret;
630
631 nobufs_unlock_dec:
632         atomic_dec(&object->n_reads);
633         wake_cookie = __fscache_unuse_cookie(cookie);
634 nobufs_unlock:
635         spin_unlock(&cookie->lock);
636         fscache_put_retrieval(op);
637         if (wake_cookie)
638                 __fscache_wake_unused_cookie(cookie);
639 nobufs:
640         fscache_stat(&fscache_n_retrievals_nobufs);
641         _leave(" = -ENOBUFS");
642         return -ENOBUFS;
643 }
644 EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
645
646 /*
647  * allocate a block in the cache on which to store a page
648  * - we return:
649  *   -ENOMEM    - out of memory, nothing done
650  *   -ERESTARTSYS - interrupted
651  *   -ENOBUFS   - no backing object available in which to cache the block
652  *   0          - block allocated
653  */
654 int __fscache_alloc_page(struct fscache_cookie *cookie,
655                          struct page *page,
656                          gfp_t gfp)
657 {
658         struct fscache_retrieval *op;
659         struct fscache_object *object;
660         bool wake_cookie = false;
661         int ret;
662
663         _enter("%p,%p,,,", cookie, page);
664
665         fscache_stat(&fscache_n_allocs);
666
667         if (hlist_empty(&cookie->backing_objects))
668                 goto nobufs;
669
670         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
671         ASSERTCMP(page, !=, NULL);
672
673         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
674                 _leave(" = -ENOBUFS [invalidating]");
675                 return -ENOBUFS;
676         }
677
678         if (fscache_wait_for_deferred_lookup(cookie) < 0)
679                 return -ERESTARTSYS;
680
681         op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
682         if (!op)
683                 return -ENOMEM;
684         atomic_set(&op->n_pages, 1);
685
686         spin_lock(&cookie->lock);
687
688         if (!fscache_cookie_enabled(cookie) ||
689             hlist_empty(&cookie->backing_objects))
690                 goto nobufs_unlock;
691         object = hlist_entry(cookie->backing_objects.first,
692                              struct fscache_object, cookie_link);
693
694         __fscache_use_cookie(cookie);
695         if (fscache_submit_op(object, &op->op) < 0)
696                 goto nobufs_unlock_dec;
697         spin_unlock(&cookie->lock);
698
699         fscache_stat(&fscache_n_alloc_ops);
700
701         ret = fscache_wait_for_operation_activation(
702                 object, &op->op,
703                 __fscache_stat(&fscache_n_alloc_op_waits),
704                 __fscache_stat(&fscache_n_allocs_object_dead));
705         if (ret < 0)
706                 goto error;
707
708         /* ask the cache to honour the operation */
709         fscache_stat(&fscache_n_cop_allocate_page);
710         ret = object->cache->ops->allocate_page(op, page, gfp);
711         fscache_stat_d(&fscache_n_cop_allocate_page);
712
713 error:
714         if (ret == -ERESTARTSYS)
715                 fscache_stat(&fscache_n_allocs_intr);
716         else if (ret < 0)
717                 fscache_stat(&fscache_n_allocs_nobufs);
718         else
719                 fscache_stat(&fscache_n_allocs_ok);
720
721         fscache_put_retrieval(op);
722         _leave(" = %d", ret);
723         return ret;
724
725 nobufs_unlock_dec:
726         wake_cookie = __fscache_unuse_cookie(cookie);
727 nobufs_unlock:
728         spin_unlock(&cookie->lock);
729         fscache_put_retrieval(op);
730         if (wake_cookie)
731                 __fscache_wake_unused_cookie(cookie);
732 nobufs:
733         fscache_stat(&fscache_n_allocs_nobufs);
734         _leave(" = -ENOBUFS");
735         return -ENOBUFS;
736 }
737 EXPORT_SYMBOL(__fscache_alloc_page);
738
739 /*
740  * Unmark pages allocate in the readahead code path (via:
741  * fscache_readpages_or_alloc) after delegating to the base filesystem
742  */
743 void __fscache_readpages_cancel(struct fscache_cookie *cookie,
744                                 struct list_head *pages)
745 {
746         struct page *page;
747
748         list_for_each_entry(page, pages, lru) {
749                 if (PageFsCache(page))
750                         __fscache_uncache_page(cookie, page);
751         }
752 }
753 EXPORT_SYMBOL(__fscache_readpages_cancel);
754
755 /*
756  * release a write op reference
757  */
758 static void fscache_release_write_op(struct fscache_operation *_op)
759 {
760         _enter("{OP%x}", _op->debug_id);
761 }
762
763 /*
764  * perform the background storage of a page into the cache
765  */
766 static void fscache_write_op(struct fscache_operation *_op)
767 {
768         struct fscache_storage *op =
769                 container_of(_op, struct fscache_storage, op);
770         struct fscache_object *object = op->op.object;
771         struct fscache_cookie *cookie;
772         struct page *page;
773         unsigned n;
774         void *results[1];
775         int ret;
776
777         _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
778
779 again:
780         spin_lock(&object->lock);
781         cookie = object->cookie;
782
783         if (!fscache_object_is_active(object)) {
784                 /* If we get here, then the on-disk cache object likely longer
785                  * exists, so we should just cancel this write operation.
786                  */
787                 spin_unlock(&object->lock);
788                 fscache_op_complete(&op->op, false);
789                 _leave(" [inactive]");
790                 return;
791         }
792
793         if (!cookie) {
794                 /* If we get here, then the cookie belonging to the object was
795                  * detached, probably by the cookie being withdrawn due to
796                  * memory pressure, which means that the pages we might write
797                  * to the cache from no longer exist - therefore, we can just
798                  * cancel this write operation.
799                  */
800                 spin_unlock(&object->lock);
801                 fscache_op_complete(&op->op, false);
802                 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
803                        _op->flags, _op->state, object->state->short_name,
804                        object->flags);
805                 return;
806         }
807
808         spin_lock(&cookie->stores_lock);
809
810         fscache_stat(&fscache_n_store_calls);
811
812         /* find a page to store */
813         page = NULL;
814         n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
815                                        FSCACHE_COOKIE_PENDING_TAG);
816         if (n != 1)
817                 goto superseded;
818         page = results[0];
819         _debug("gang %d [%lx]", n, page->index);
820
821         radix_tree_tag_set(&cookie->stores, page->index,
822                            FSCACHE_COOKIE_STORING_TAG);
823         radix_tree_tag_clear(&cookie->stores, page->index,
824                              FSCACHE_COOKIE_PENDING_TAG);
825
826         spin_unlock(&cookie->stores_lock);
827         spin_unlock(&object->lock);
828
829         if (page->index >= op->store_limit)
830                 goto discard_page;
831
832         fscache_stat(&fscache_n_store_pages);
833         fscache_stat(&fscache_n_cop_write_page);
834         ret = object->cache->ops->write_page(op, page);
835         fscache_stat_d(&fscache_n_cop_write_page);
836         fscache_end_page_write(object, page);
837         if (ret < 0) {
838                 fscache_abort_object(object);
839                 fscache_op_complete(&op->op, true);
840         } else {
841                 fscache_enqueue_operation(&op->op);
842         }
843
844         _leave("");
845         return;
846
847 discard_page:
848         fscache_stat(&fscache_n_store_pages_over_limit);
849         fscache_end_page_write(object, page);
850         goto again;
851
852 superseded:
853         /* this writer is going away and there aren't any more things to
854          * write */
855         _debug("cease");
856         spin_unlock(&cookie->stores_lock);
857         clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
858         spin_unlock(&object->lock);
859         fscache_op_complete(&op->op, true);
860         _leave("");
861 }
862
863 /*
864  * Clear the pages pending writing for invalidation
865  */
866 void fscache_invalidate_writes(struct fscache_cookie *cookie)
867 {
868         struct page *page;
869         void *results[16];
870         int n, i;
871
872         _enter("");
873
874         for (;;) {
875                 spin_lock(&cookie->stores_lock);
876                 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
877                                                ARRAY_SIZE(results),
878                                                FSCACHE_COOKIE_PENDING_TAG);
879                 if (n == 0) {
880                         spin_unlock(&cookie->stores_lock);
881                         break;
882                 }
883
884                 for (i = n - 1; i >= 0; i--) {
885                         page = results[i];
886                         radix_tree_delete(&cookie->stores, page->index);
887                 }
888
889                 spin_unlock(&cookie->stores_lock);
890
891                 for (i = n - 1; i >= 0; i--)
892                         put_page(results[i]);
893         }
894
895         wake_up_bit(&cookie->flags, 0);
896
897         _leave("");
898 }
899
900 /*
901  * request a page be stored in the cache
902  * - returns:
903  *   -ENOMEM    - out of memory, nothing done
904  *   -ENOBUFS   - no backing object available in which to cache the page
905  *   0          - dispatched a write - it'll call end_io_func() when finished
906  *
907  * if the cookie still has a backing object at this point, that object can be
908  * in one of a few states with respect to storage processing:
909  *
910  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
911  *      set)
912  *
913  *      (a) no writes yet
914  *
915  *      (b) writes deferred till post-creation (mark page for writing and
916  *          return immediately)
917  *
918  *  (2) negative lookup, object created, initial fill being made from netfs
919  *
920  *      (a) fill point not yet reached this page (mark page for writing and
921  *          return)
922  *
923  *      (b) fill point passed this page (queue op to store this page)
924  *
925  *  (3) object extant (queue op to store this page)
926  *
927  * any other state is invalid
928  */
929 int __fscache_write_page(struct fscache_cookie *cookie,
930                          struct page *page,
931                          gfp_t gfp)
932 {
933         struct fscache_storage *op;
934         struct fscache_object *object;
935         bool wake_cookie = false;
936         int ret;
937
938         _enter("%p,%x,", cookie, (u32) page->flags);
939
940         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
941         ASSERT(PageFsCache(page));
942
943         fscache_stat(&fscache_n_stores);
944
945         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
946                 _leave(" = -ENOBUFS [invalidating]");
947                 return -ENOBUFS;
948         }
949
950         op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
951         if (!op)
952                 goto nomem;
953
954         fscache_operation_init(&op->op, fscache_write_op, NULL,
955                                fscache_release_write_op);
956         op->op.flags = FSCACHE_OP_ASYNC |
957                 (1 << FSCACHE_OP_WAITING) |
958                 (1 << FSCACHE_OP_UNUSE_COOKIE);
959
960         ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
961         if (ret < 0)
962                 goto nomem_free;
963
964         ret = -ENOBUFS;
965         spin_lock(&cookie->lock);
966
967         if (!fscache_cookie_enabled(cookie) ||
968             hlist_empty(&cookie->backing_objects))
969                 goto nobufs;
970         object = hlist_entry(cookie->backing_objects.first,
971                              struct fscache_object, cookie_link);
972         if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
973                 goto nobufs;
974
975         /* add the page to the pending-storage radix tree on the backing
976          * object */
977         spin_lock(&object->lock);
978         spin_lock(&cookie->stores_lock);
979
980         _debug("store limit %llx", (unsigned long long) object->store_limit);
981
982         ret = radix_tree_insert(&cookie->stores, page->index, page);
983         if (ret < 0) {
984                 if (ret == -EEXIST)
985                         goto already_queued;
986                 _debug("insert failed %d", ret);
987                 goto nobufs_unlock_obj;
988         }
989
990         radix_tree_tag_set(&cookie->stores, page->index,
991                            FSCACHE_COOKIE_PENDING_TAG);
992         get_page(page);
993
994         /* we only want one writer at a time, but we do need to queue new
995          * writers after exclusive ops */
996         if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
997                 goto already_pending;
998
999         spin_unlock(&cookie->stores_lock);
1000         spin_unlock(&object->lock);
1001
1002         op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
1003         op->store_limit = object->store_limit;
1004
1005         __fscache_use_cookie(cookie);
1006         if (fscache_submit_op(object, &op->op) < 0)
1007                 goto submit_failed;
1008
1009         spin_unlock(&cookie->lock);
1010         radix_tree_preload_end();
1011         fscache_stat(&fscache_n_store_ops);
1012         fscache_stat(&fscache_n_stores_ok);
1013
1014         /* the work queue now carries its own ref on the object */
1015         fscache_put_operation(&op->op);
1016         _leave(" = 0");
1017         return 0;
1018
1019 already_queued:
1020         fscache_stat(&fscache_n_stores_again);
1021 already_pending:
1022         spin_unlock(&cookie->stores_lock);
1023         spin_unlock(&object->lock);
1024         spin_unlock(&cookie->lock);
1025         radix_tree_preload_end();
1026         fscache_put_operation(&op->op);
1027         fscache_stat(&fscache_n_stores_ok);
1028         _leave(" = 0");
1029         return 0;
1030
1031 submit_failed:
1032         spin_lock(&cookie->stores_lock);
1033         radix_tree_delete(&cookie->stores, page->index);
1034         spin_unlock(&cookie->stores_lock);
1035         wake_cookie = __fscache_unuse_cookie(cookie);
1036         put_page(page);
1037         ret = -ENOBUFS;
1038         goto nobufs;
1039
1040 nobufs_unlock_obj:
1041         spin_unlock(&cookie->stores_lock);
1042         spin_unlock(&object->lock);
1043 nobufs:
1044         spin_unlock(&cookie->lock);
1045         radix_tree_preload_end();
1046         fscache_put_operation(&op->op);
1047         if (wake_cookie)
1048                 __fscache_wake_unused_cookie(cookie);
1049         fscache_stat(&fscache_n_stores_nobufs);
1050         _leave(" = -ENOBUFS");
1051         return -ENOBUFS;
1052
1053 nomem_free:
1054         fscache_put_operation(&op->op);
1055 nomem:
1056         fscache_stat(&fscache_n_stores_oom);
1057         _leave(" = -ENOMEM");
1058         return -ENOMEM;
1059 }
1060 EXPORT_SYMBOL(__fscache_write_page);
1061
1062 /*
1063  * remove a page from the cache
1064  */
1065 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
1066 {
1067         struct fscache_object *object;
1068
1069         _enter(",%p", page);
1070
1071         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
1072         ASSERTCMP(page, !=, NULL);
1073
1074         fscache_stat(&fscache_n_uncaches);
1075
1076         /* cache withdrawal may beat us to it */
1077         if (!PageFsCache(page))
1078                 goto done;
1079
1080         /* get the object */
1081         spin_lock(&cookie->lock);
1082
1083         if (hlist_empty(&cookie->backing_objects)) {
1084                 ClearPageFsCache(page);
1085                 goto done_unlock;
1086         }
1087
1088         object = hlist_entry(cookie->backing_objects.first,
1089                              struct fscache_object, cookie_link);
1090
1091         /* there might now be stuff on disk we could read */
1092         clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
1093
1094         /* only invoke the cache backend if we managed to mark the page
1095          * uncached here; this deals with synchronisation vs withdrawal */
1096         if (TestClearPageFsCache(page) &&
1097             object->cache->ops->uncache_page) {
1098                 /* the cache backend releases the cookie lock */
1099                 fscache_stat(&fscache_n_cop_uncache_page);
1100                 object->cache->ops->uncache_page(object, page);
1101                 fscache_stat_d(&fscache_n_cop_uncache_page);
1102                 goto done;
1103         }
1104
1105 done_unlock:
1106         spin_unlock(&cookie->lock);
1107 done:
1108         _leave("");
1109 }
1110 EXPORT_SYMBOL(__fscache_uncache_page);
1111
1112 /**
1113  * fscache_mark_page_cached - Mark a page as being cached
1114  * @op: The retrieval op pages are being marked for
1115  * @page: The page to be marked
1116  *
1117  * Mark a netfs page as being cached.  After this is called, the netfs
1118  * must call fscache_uncache_page() to remove the mark.
1119  */
1120 void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
1121 {
1122         struct fscache_cookie *cookie = op->op.object->cookie;
1123
1124 #ifdef CONFIG_FSCACHE_STATS
1125         atomic_inc(&fscache_n_marks);
1126 #endif
1127
1128         _debug("- mark %p{%lx}", page, page->index);
1129         if (TestSetPageFsCache(page)) {
1130                 static bool once_only;
1131                 if (!once_only) {
1132                         once_only = true;
1133                         pr_warn("Cookie type %s marked page %lx multiple times\n",
1134                                 cookie->def->name, page->index);
1135                 }
1136         }
1137
1138         if (cookie->def->mark_page_cached)
1139                 cookie->def->mark_page_cached(cookie->netfs_data,
1140                                               op->mapping, page);
1141 }
1142 EXPORT_SYMBOL(fscache_mark_page_cached);
1143
1144 /**
1145  * fscache_mark_pages_cached - Mark pages as being cached
1146  * @op: The retrieval op pages are being marked for
1147  * @pagevec: The pages to be marked
1148  *
1149  * Mark a bunch of netfs pages as being cached.  After this is called,
1150  * the netfs must call fscache_uncache_page() to remove the mark.
1151  */
1152 void fscache_mark_pages_cached(struct fscache_retrieval *op,
1153                                struct pagevec *pagevec)
1154 {
1155         unsigned long loop;
1156
1157         for (loop = 0; loop < pagevec->nr; loop++)
1158                 fscache_mark_page_cached(op, pagevec->pages[loop]);
1159
1160         pagevec_reinit(pagevec);
1161 }
1162 EXPORT_SYMBOL(fscache_mark_pages_cached);
1163
1164 /*
1165  * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1166  * to be associated with the given cookie.
1167  */
1168 void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
1169                                        struct inode *inode)
1170 {
1171         struct address_space *mapping = inode->i_mapping;
1172         struct pagevec pvec;
1173         pgoff_t next;
1174         int i;
1175
1176         _enter("%p,%p", cookie, inode);
1177
1178         if (!mapping || mapping->nrpages == 0) {
1179                 _leave(" [no pages]");
1180                 return;
1181         }
1182
1183         pagevec_init(&pvec, 0);
1184         next = 0;
1185         do {
1186                 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
1187                         break;
1188                 for (i = 0; i < pagevec_count(&pvec); i++) {
1189                         struct page *page = pvec.pages[i];
1190                         next = page->index;
1191                         if (PageFsCache(page)) {
1192                                 __fscache_wait_on_page_write(cookie, page);
1193                                 __fscache_uncache_page(cookie, page);
1194                         }
1195                 }
1196                 pagevec_release(&pvec);
1197                 cond_resched();
1198         } while (++next);
1199
1200         _leave("");
1201 }
1202 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);