GNU Linux-libre 4.9.309-gnu1
[releases.git] / fs / ocfs2 / buffer_head_io.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * io.c
5  *
6  * Buffer cache handling
7  *
8  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25
26 #include <linux/fs.h>
27 #include <linux/types.h>
28 #include <linux/highmem.h>
29
30 #include <cluster/masklog.h>
31
32 #include "ocfs2.h"
33
34 #include "alloc.h"
35 #include "inode.h"
36 #include "journal.h"
37 #include "uptodate.h"
38 #include "buffer_head_io.h"
39 #include "ocfs2_trace.h"
40
41 /*
42  * Bits on bh->b_state used by ocfs2.
43  *
44  * These MUST be after the JBD2 bits.  Hence, we use BH_JBDPrivateStart.
45  */
46 enum ocfs2_state_bits {
47         BH_NeedsValidate = BH_JBDPrivateStart,
48 };
49
50 /* Expand the magic b_state functions */
51 BUFFER_FNS(NeedsValidate, needs_validate);
52
53 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
54                       struct ocfs2_caching_info *ci)
55 {
56         int ret = 0;
57
58         trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
59
60         BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
61         BUG_ON(buffer_jbd(bh));
62
63         /* No need to check for a soft readonly file system here. non
64          * journalled writes are only ever done on system files which
65          * can get modified during recovery even if read-only. */
66         if (ocfs2_is_hard_readonly(osb)) {
67                 ret = -EROFS;
68                 mlog_errno(ret);
69                 goto out;
70         }
71
72         ocfs2_metadata_cache_io_lock(ci);
73
74         lock_buffer(bh);
75         set_buffer_uptodate(bh);
76
77         /* remove from dirty list before I/O. */
78         clear_buffer_dirty(bh);
79
80         get_bh(bh); /* for end_buffer_write_sync() */
81         bh->b_end_io = end_buffer_write_sync;
82         submit_bh(REQ_OP_WRITE, 0, bh);
83
84         wait_on_buffer(bh);
85
86         if (buffer_uptodate(bh)) {
87                 ocfs2_set_buffer_uptodate(ci, bh);
88         } else {
89                 /* We don't need to remove the clustered uptodate
90                  * information for this bh as it's not marked locally
91                  * uptodate. */
92                 ret = -EIO;
93                 mlog_errno(ret);
94         }
95
96         ocfs2_metadata_cache_io_unlock(ci);
97 out:
98         return ret;
99 }
100
101 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
102  * will be easier to handle read failure.
103  */
104 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
105                            unsigned int nr, struct buffer_head *bhs[])
106 {
107         int status = 0;
108         unsigned int i;
109         struct buffer_head *bh;
110         int new_bh = 0;
111
112         trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
113
114         if (!nr)
115                 goto bail;
116
117         /* Don't put buffer head and re-assign it to NULL if it is allocated
118          * outside since the caller can't be aware of this alternation!
119          */
120         new_bh = (bhs[0] == NULL);
121
122         for (i = 0 ; i < nr ; i++) {
123                 if (bhs[i] == NULL) {
124                         bhs[i] = sb_getblk(osb->sb, block++);
125                         if (bhs[i] == NULL) {
126                                 status = -ENOMEM;
127                                 mlog_errno(status);
128                                 break;
129                         }
130                 }
131                 bh = bhs[i];
132
133                 if (buffer_jbd(bh)) {
134                         trace_ocfs2_read_blocks_sync_jbd(
135                                         (unsigned long long)bh->b_blocknr);
136                         continue;
137                 }
138
139                 if (buffer_dirty(bh)) {
140                         /* This should probably be a BUG, or
141                          * at least return an error. */
142                         mlog(ML_ERROR,
143                              "trying to sync read a dirty "
144                              "buffer! (blocknr = %llu), skipping\n",
145                              (unsigned long long)bh->b_blocknr);
146                         continue;
147                 }
148
149                 lock_buffer(bh);
150                 if (buffer_jbd(bh)) {
151 #ifdef CATCH_BH_JBD_RACES
152                         mlog(ML_ERROR,
153                              "block %llu had the JBD bit set "
154                              "while I was in lock_buffer!",
155                              (unsigned long long)bh->b_blocknr);
156                         BUG();
157 #else
158                         unlock_buffer(bh);
159                         continue;
160 #endif
161                 }
162
163                 get_bh(bh); /* for end_buffer_read_sync() */
164                 bh->b_end_io = end_buffer_read_sync;
165                 submit_bh(REQ_OP_READ, 0, bh);
166         }
167
168 read_failure:
169         for (i = nr; i > 0; i--) {
170                 bh = bhs[i - 1];
171
172                 if (unlikely(status)) {
173                         if (new_bh && bh) {
174                                 /* If middle bh fails, let previous bh
175                                  * finish its read and then put it to
176                                  * aovoid bh leak
177                                  */
178                                 if (!buffer_jbd(bh))
179                                         wait_on_buffer(bh);
180                                 put_bh(bh);
181                                 bhs[i - 1] = NULL;
182                         } else if (bh && buffer_uptodate(bh)) {
183                                 clear_buffer_uptodate(bh);
184                         }
185                         continue;
186                 }
187
188                 /* No need to wait on the buffer if it's managed by JBD. */
189                 if (!buffer_jbd(bh))
190                         wait_on_buffer(bh);
191
192                 if (!buffer_uptodate(bh)) {
193                         /* Status won't be cleared from here on out,
194                          * so we can safely record this and loop back
195                          * to cleanup the other buffers. */
196                         status = -EIO;
197                         goto read_failure;
198                 }
199         }
200
201 bail:
202         return status;
203 }
204
205 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
206  * will be easier to handle read failure.
207  */
208 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
209                       struct buffer_head *bhs[], int flags,
210                       int (*validate)(struct super_block *sb,
211                                       struct buffer_head *bh))
212 {
213         int status = 0;
214         int i, ignore_cache = 0;
215         struct buffer_head *bh;
216         struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
217         int new_bh = 0;
218
219         trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
220
221         BUG_ON(!ci);
222         BUG_ON((flags & OCFS2_BH_READAHEAD) &&
223                (flags & OCFS2_BH_IGNORE_CACHE));
224
225         if (bhs == NULL) {
226                 status = -EINVAL;
227                 mlog_errno(status);
228                 goto bail;
229         }
230
231         if (nr < 0) {
232                 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
233                 status = -EINVAL;
234                 mlog_errno(status);
235                 goto bail;
236         }
237
238         if (nr == 0) {
239                 status = 0;
240                 goto bail;
241         }
242
243         /* Don't put buffer head and re-assign it to NULL if it is allocated
244          * outside since the caller can't be aware of this alternation!
245          */
246         new_bh = (bhs[0] == NULL);
247
248         ocfs2_metadata_cache_io_lock(ci);
249         for (i = 0 ; i < nr ; i++) {
250                 if (bhs[i] == NULL) {
251                         bhs[i] = sb_getblk(sb, block++);
252                         if (bhs[i] == NULL) {
253                                 ocfs2_metadata_cache_io_unlock(ci);
254                                 status = -ENOMEM;
255                                 mlog_errno(status);
256                                 /* Don't forget to put previous bh! */
257                                 break;
258                         }
259                 }
260                 bh = bhs[i];
261                 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
262
263                 /* There are three read-ahead cases here which we need to
264                  * be concerned with. All three assume a buffer has
265                  * previously been submitted with OCFS2_BH_READAHEAD
266                  * and it hasn't yet completed I/O.
267                  *
268                  * 1) The current request is sync to disk. This rarely
269                  *    happens these days, and never when performance
270                  *    matters - the code can just wait on the buffer
271                  *    lock and re-submit.
272                  *
273                  * 2) The current request is cached, but not
274                  *    readahead. ocfs2_buffer_uptodate() will return
275                  *    false anyway, so we'll wind up waiting on the
276                  *    buffer lock to do I/O. We re-check the request
277                  *    with after getting the lock to avoid a re-submit.
278                  *
279                  * 3) The current request is readahead (and so must
280                  *    also be a caching one). We short circuit if the
281                  *    buffer is locked (under I/O) and if it's in the
282                  *    uptodate cache. The re-check from #2 catches the
283                  *    case that the previous read-ahead completes just
284                  *    before our is-it-in-flight check.
285                  */
286
287                 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
288                         trace_ocfs2_read_blocks_from_disk(
289                              (unsigned long long)bh->b_blocknr,
290                              (unsigned long long)ocfs2_metadata_cache_owner(ci));
291                         /* We're using ignore_cache here to say
292                          * "go to disk" */
293                         ignore_cache = 1;
294                 }
295
296                 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
297                         ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
298
299                 if (buffer_jbd(bh)) {
300                         continue;
301                 }
302
303                 if (ignore_cache) {
304                         if (buffer_dirty(bh)) {
305                                 /* This should probably be a BUG, or
306                                  * at least return an error. */
307                                 continue;
308                         }
309
310                         /* A read-ahead request was made - if the
311                          * buffer is already under read-ahead from a
312                          * previously submitted request than we are
313                          * done here. */
314                         if ((flags & OCFS2_BH_READAHEAD)
315                             && ocfs2_buffer_read_ahead(ci, bh))
316                                 continue;
317
318                         lock_buffer(bh);
319                         if (buffer_jbd(bh)) {
320 #ifdef CATCH_BH_JBD_RACES
321                                 mlog(ML_ERROR, "block %llu had the JBD bit set "
322                                                "while I was in lock_buffer!",
323                                      (unsigned long long)bh->b_blocknr);
324                                 BUG();
325 #else
326                                 unlock_buffer(bh);
327                                 continue;
328 #endif
329                         }
330
331                         /* Re-check ocfs2_buffer_uptodate() as a
332                          * previously read-ahead buffer may have
333                          * completed I/O while we were waiting for the
334                          * buffer lock. */
335                         if (!(flags & OCFS2_BH_IGNORE_CACHE)
336                             && !(flags & OCFS2_BH_READAHEAD)
337                             && ocfs2_buffer_uptodate(ci, bh)) {
338                                 unlock_buffer(bh);
339                                 continue;
340                         }
341
342                         get_bh(bh); /* for end_buffer_read_sync() */
343                         if (validate)
344                                 set_buffer_needs_validate(bh);
345                         bh->b_end_io = end_buffer_read_sync;
346                         submit_bh(REQ_OP_READ, 0, bh);
347                         continue;
348                 }
349         }
350
351 read_failure:
352         for (i = (nr - 1); i >= 0; i--) {
353                 bh = bhs[i];
354
355                 if (!(flags & OCFS2_BH_READAHEAD)) {
356                         if (unlikely(status)) {
357                                 /* Clear the buffers on error including those
358                                  * ever succeeded in reading
359                                  */
360                                 if (new_bh && bh) {
361                                         /* If middle bh fails, let previous bh
362                                          * finish its read and then put it to
363                                          * aovoid bh leak
364                                          */
365                                         if (!buffer_jbd(bh))
366                                                 wait_on_buffer(bh);
367                                         put_bh(bh);
368                                         bhs[i] = NULL;
369                                 } else if (bh && buffer_uptodate(bh)) {
370                                         clear_buffer_uptodate(bh);
371                                 }
372                                 continue;
373                         }
374                         /* We know this can't have changed as we hold the
375                          * owner sem. Avoid doing any work on the bh if the
376                          * journal has it. */
377                         if (!buffer_jbd(bh))
378                                 wait_on_buffer(bh);
379
380                         if (!buffer_uptodate(bh)) {
381                                 /* Status won't be cleared from here on out,
382                                  * so we can safely record this and loop back
383                                  * to cleanup the other buffers. Don't need to
384                                  * remove the clustered uptodate information
385                                  * for this bh as it's not marked locally
386                                  * uptodate. */
387                                 status = -EIO;
388                                 clear_buffer_needs_validate(bh);
389                                 goto read_failure;
390                         }
391
392                         if (buffer_needs_validate(bh)) {
393                                 /* We never set NeedsValidate if the
394                                  * buffer was held by the journal, so
395                                  * that better not have changed */
396                                 BUG_ON(buffer_jbd(bh));
397                                 clear_buffer_needs_validate(bh);
398                                 status = validate(sb, bh);
399                                 if (status)
400                                         goto read_failure;
401                         }
402                 }
403
404                 /* Always set the buffer in the cache, even if it was
405                  * a forced read, or read-ahead which hasn't yet
406                  * completed. */
407                 ocfs2_set_buffer_uptodate(ci, bh);
408         }
409         ocfs2_metadata_cache_io_unlock(ci);
410
411         trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
412                                     flags, ignore_cache);
413
414 bail:
415
416         return status;
417 }
418
419 /* Check whether the blkno is the super block or one of the backups. */
420 static void ocfs2_check_super_or_backup(struct super_block *sb,
421                                         sector_t blkno)
422 {
423         int i;
424         u64 backup_blkno;
425
426         if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
427                 return;
428
429         for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
430                 backup_blkno = ocfs2_backup_super_blkno(sb, i);
431                 if (backup_blkno == blkno)
432                         return;
433         }
434
435         BUG();
436 }
437
438 /*
439  * Write super block and backups doesn't need to collaborate with journal,
440  * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
441  * into this function.
442  */
443 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
444                                 struct buffer_head *bh)
445 {
446         int ret = 0;
447         struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
448
449         BUG_ON(buffer_jbd(bh));
450         ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
451
452         if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
453                 ret = -EROFS;
454                 mlog_errno(ret);
455                 goto out;
456         }
457
458         lock_buffer(bh);
459         set_buffer_uptodate(bh);
460
461         /* remove from dirty list before I/O. */
462         clear_buffer_dirty(bh);
463
464         get_bh(bh); /* for end_buffer_write_sync() */
465         bh->b_end_io = end_buffer_write_sync;
466         ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
467         submit_bh(REQ_OP_WRITE, 0, bh);
468
469         wait_on_buffer(bh);
470
471         if (!buffer_uptodate(bh)) {
472                 ret = -EIO;
473                 mlog_errno(ret);
474         }
475
476 out:
477         return ret;
478 }