GNU Linux-libre 4.4.284-gnu1
[releases.git] / fs / ocfs2 / buffer_head_io.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * io.c
5  *
6  * Buffer cache handling
7  *
8  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25
26 #include <linux/fs.h>
27 #include <linux/types.h>
28 #include <linux/highmem.h>
29
30 #include <cluster/masklog.h>
31
32 #include "ocfs2.h"
33
34 #include "alloc.h"
35 #include "inode.h"
36 #include "journal.h"
37 #include "uptodate.h"
38 #include "buffer_head_io.h"
39 #include "ocfs2_trace.h"
40
41 /*
42  * Bits on bh->b_state used by ocfs2.
43  *
44  * These MUST be after the JBD2 bits.  Hence, we use BH_JBDPrivateStart.
45  */
46 enum ocfs2_state_bits {
47         BH_NeedsValidate = BH_JBDPrivateStart,
48 };
49
50 /* Expand the magic b_state functions */
51 BUFFER_FNS(NeedsValidate, needs_validate);
52
53 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
54                       struct ocfs2_caching_info *ci)
55 {
56         int ret = 0;
57
58         trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
59
60         BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
61         BUG_ON(buffer_jbd(bh));
62
63         /* No need to check for a soft readonly file system here. non
64          * journalled writes are only ever done on system files which
65          * can get modified during recovery even if read-only. */
66         if (ocfs2_is_hard_readonly(osb)) {
67                 ret = -EROFS;
68                 mlog_errno(ret);
69                 goto out;
70         }
71
72         ocfs2_metadata_cache_io_lock(ci);
73
74         lock_buffer(bh);
75         set_buffer_uptodate(bh);
76
77         /* remove from dirty list before I/O. */
78         clear_buffer_dirty(bh);
79
80         get_bh(bh); /* for end_buffer_write_sync() */
81         bh->b_end_io = end_buffer_write_sync;
82         submit_bh(WRITE, bh);
83
84         wait_on_buffer(bh);
85
86         if (buffer_uptodate(bh)) {
87                 ocfs2_set_buffer_uptodate(ci, bh);
88         } else {
89                 /* We don't need to remove the clustered uptodate
90                  * information for this bh as it's not marked locally
91                  * uptodate. */
92                 ret = -EIO;
93                 mlog_errno(ret);
94         }
95
96         ocfs2_metadata_cache_io_unlock(ci);
97 out:
98         return ret;
99 }
100
101 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
102  * will be easier to handle read failure.
103  */
104 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
105                            unsigned int nr, struct buffer_head *bhs[])
106 {
107         int status = 0;
108         unsigned int i;
109         struct buffer_head *bh;
110         int new_bh = 0;
111
112         trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
113
114         if (!nr)
115                 goto bail;
116
117         /* Don't put buffer head and re-assign it to NULL if it is allocated
118          * outside since the caller can't be aware of this alternation!
119          */
120         new_bh = (bhs[0] == NULL);
121
122         for (i = 0 ; i < nr ; i++) {
123                 if (bhs[i] == NULL) {
124                         bhs[i] = sb_getblk(osb->sb, block++);
125                         if (bhs[i] == NULL) {
126                                 status = -ENOMEM;
127                                 mlog_errno(status);
128                                 break;
129                         }
130                 }
131                 bh = bhs[i];
132
133                 if (buffer_jbd(bh)) {
134                         trace_ocfs2_read_blocks_sync_jbd(
135                                         (unsigned long long)bh->b_blocknr);
136                         continue;
137                 }
138
139                 if (buffer_dirty(bh)) {
140                         /* This should probably be a BUG, or
141                          * at least return an error. */
142                         mlog(ML_ERROR,
143                              "trying to sync read a dirty "
144                              "buffer! (blocknr = %llu), skipping\n",
145                              (unsigned long long)bh->b_blocknr);
146                         continue;
147                 }
148
149                 lock_buffer(bh);
150                 if (buffer_jbd(bh)) {
151                         mlog(ML_ERROR,
152                              "block %llu had the JBD bit set "
153                              "while I was in lock_buffer!",
154                              (unsigned long long)bh->b_blocknr);
155                         BUG();
156                 }
157
158                 get_bh(bh); /* for end_buffer_read_sync() */
159                 bh->b_end_io = end_buffer_read_sync;
160                 submit_bh(READ, bh);
161         }
162
163 read_failure:
164         for (i = nr; i > 0; i--) {
165                 bh = bhs[i - 1];
166
167                 if (unlikely(status)) {
168                         if (new_bh && bh) {
169                                 /* If middle bh fails, let previous bh
170                                  * finish its read and then put it to
171                                  * aovoid bh leak
172                                  */
173                                 if (!buffer_jbd(bh))
174                                         wait_on_buffer(bh);
175                                 put_bh(bh);
176                                 bhs[i - 1] = NULL;
177                         } else if (bh && buffer_uptodate(bh)) {
178                                 clear_buffer_uptodate(bh);
179                         }
180                         continue;
181                 }
182
183                 /* No need to wait on the buffer if it's managed by JBD. */
184                 if (!buffer_jbd(bh))
185                         wait_on_buffer(bh);
186
187                 if (!buffer_uptodate(bh)) {
188                         /* Status won't be cleared from here on out,
189                          * so we can safely record this and loop back
190                          * to cleanup the other buffers. */
191                         status = -EIO;
192                         goto read_failure;
193                 }
194         }
195
196 bail:
197         return status;
198 }
199
200 /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
201  * will be easier to handle read failure.
202  */
203 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
204                       struct buffer_head *bhs[], int flags,
205                       int (*validate)(struct super_block *sb,
206                                       struct buffer_head *bh))
207 {
208         int status = 0;
209         int i, ignore_cache = 0;
210         struct buffer_head *bh;
211         struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
212         int new_bh = 0;
213
214         trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
215
216         BUG_ON(!ci);
217         BUG_ON((flags & OCFS2_BH_READAHEAD) &&
218                (flags & OCFS2_BH_IGNORE_CACHE));
219
220         if (bhs == NULL) {
221                 status = -EINVAL;
222                 mlog_errno(status);
223                 goto bail;
224         }
225
226         if (nr < 0) {
227                 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
228                 status = -EINVAL;
229                 mlog_errno(status);
230                 goto bail;
231         }
232
233         if (nr == 0) {
234                 status = 0;
235                 goto bail;
236         }
237
238         /* Don't put buffer head and re-assign it to NULL if it is allocated
239          * outside since the caller can't be aware of this alternation!
240          */
241         new_bh = (bhs[0] == NULL);
242
243         ocfs2_metadata_cache_io_lock(ci);
244         for (i = 0 ; i < nr ; i++) {
245                 if (bhs[i] == NULL) {
246                         bhs[i] = sb_getblk(sb, block++);
247                         if (bhs[i] == NULL) {
248                                 ocfs2_metadata_cache_io_unlock(ci);
249                                 status = -ENOMEM;
250                                 mlog_errno(status);
251                                 /* Don't forget to put previous bh! */
252                                 break;
253                         }
254                 }
255                 bh = bhs[i];
256                 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
257
258                 /* There are three read-ahead cases here which we need to
259                  * be concerned with. All three assume a buffer has
260                  * previously been submitted with OCFS2_BH_READAHEAD
261                  * and it hasn't yet completed I/O.
262                  *
263                  * 1) The current request is sync to disk. This rarely
264                  *    happens these days, and never when performance
265                  *    matters - the code can just wait on the buffer
266                  *    lock and re-submit.
267                  *
268                  * 2) The current request is cached, but not
269                  *    readahead. ocfs2_buffer_uptodate() will return
270                  *    false anyway, so we'll wind up waiting on the
271                  *    buffer lock to do I/O. We re-check the request
272                  *    with after getting the lock to avoid a re-submit.
273                  *
274                  * 3) The current request is readahead (and so must
275                  *    also be a caching one). We short circuit if the
276                  *    buffer is locked (under I/O) and if it's in the
277                  *    uptodate cache. The re-check from #2 catches the
278                  *    case that the previous read-ahead completes just
279                  *    before our is-it-in-flight check.
280                  */
281
282                 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
283                         trace_ocfs2_read_blocks_from_disk(
284                              (unsigned long long)bh->b_blocknr,
285                              (unsigned long long)ocfs2_metadata_cache_owner(ci));
286                         /* We're using ignore_cache here to say
287                          * "go to disk" */
288                         ignore_cache = 1;
289                 }
290
291                 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
292                         ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
293
294                 if (buffer_jbd(bh)) {
295                         continue;
296                 }
297
298                 if (ignore_cache) {
299                         if (buffer_dirty(bh)) {
300                                 /* This should probably be a BUG, or
301                                  * at least return an error. */
302                                 continue;
303                         }
304
305                         /* A read-ahead request was made - if the
306                          * buffer is already under read-ahead from a
307                          * previously submitted request than we are
308                          * done here. */
309                         if ((flags & OCFS2_BH_READAHEAD)
310                             && ocfs2_buffer_read_ahead(ci, bh))
311                                 continue;
312
313                         lock_buffer(bh);
314                         if (buffer_jbd(bh)) {
315 #ifdef CATCH_BH_JBD_RACES
316                                 mlog(ML_ERROR, "block %llu had the JBD bit set "
317                                                "while I was in lock_buffer!",
318                                      (unsigned long long)bh->b_blocknr);
319                                 BUG();
320 #else
321                                 unlock_buffer(bh);
322                                 continue;
323 #endif
324                         }
325
326                         /* Re-check ocfs2_buffer_uptodate() as a
327                          * previously read-ahead buffer may have
328                          * completed I/O while we were waiting for the
329                          * buffer lock. */
330                         if (!(flags & OCFS2_BH_IGNORE_CACHE)
331                             && !(flags & OCFS2_BH_READAHEAD)
332                             && ocfs2_buffer_uptodate(ci, bh)) {
333                                 unlock_buffer(bh);
334                                 continue;
335                         }
336
337                         get_bh(bh); /* for end_buffer_read_sync() */
338                         if (validate)
339                                 set_buffer_needs_validate(bh);
340                         bh->b_end_io = end_buffer_read_sync;
341                         submit_bh(READ, bh);
342                         continue;
343                 }
344         }
345
346 read_failure:
347         for (i = (nr - 1); i >= 0; i--) {
348                 bh = bhs[i];
349
350                 if (!(flags & OCFS2_BH_READAHEAD)) {
351                         if (unlikely(status)) {
352                                 /* Clear the buffers on error including those
353                                  * ever succeeded in reading
354                                  */
355                                 if (new_bh && bh) {
356                                         /* If middle bh fails, let previous bh
357                                          * finish its read and then put it to
358                                          * aovoid bh leak
359                                          */
360                                         if (!buffer_jbd(bh))
361                                                 wait_on_buffer(bh);
362                                         put_bh(bh);
363                                         bhs[i] = NULL;
364                                 } else if (bh && buffer_uptodate(bh)) {
365                                         clear_buffer_uptodate(bh);
366                                 }
367                                 continue;
368                         }
369                         /* We know this can't have changed as we hold the
370                          * owner sem. Avoid doing any work on the bh if the
371                          * journal has it. */
372                         if (!buffer_jbd(bh))
373                                 wait_on_buffer(bh);
374
375                         if (!buffer_uptodate(bh)) {
376                                 /* Status won't be cleared from here on out,
377                                  * so we can safely record this and loop back
378                                  * to cleanup the other buffers. Don't need to
379                                  * remove the clustered uptodate information
380                                  * for this bh as it's not marked locally
381                                  * uptodate. */
382                                 status = -EIO;
383                                 clear_buffer_needs_validate(bh);
384                                 goto read_failure;
385                         }
386
387                         if (buffer_needs_validate(bh)) {
388                                 /* We never set NeedsValidate if the
389                                  * buffer was held by the journal, so
390                                  * that better not have changed */
391                                 BUG_ON(buffer_jbd(bh));
392                                 clear_buffer_needs_validate(bh);
393                                 status = validate(sb, bh);
394                                 if (status)
395                                         goto read_failure;
396                         }
397                 }
398
399                 /* Always set the buffer in the cache, even if it was
400                  * a forced read, or read-ahead which hasn't yet
401                  * completed. */
402                 ocfs2_set_buffer_uptodate(ci, bh);
403         }
404         ocfs2_metadata_cache_io_unlock(ci);
405
406         trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
407                                     flags, ignore_cache);
408
409 bail:
410
411         return status;
412 }
413
414 /* Check whether the blkno is the super block or one of the backups. */
415 static void ocfs2_check_super_or_backup(struct super_block *sb,
416                                         sector_t blkno)
417 {
418         int i;
419         u64 backup_blkno;
420
421         if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
422                 return;
423
424         for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
425                 backup_blkno = ocfs2_backup_super_blkno(sb, i);
426                 if (backup_blkno == blkno)
427                         return;
428         }
429
430         BUG();
431 }
432
433 /*
434  * Write super block and backups doesn't need to collaborate with journal,
435  * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
436  * into this function.
437  */
438 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
439                                 struct buffer_head *bh)
440 {
441         int ret = 0;
442         struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
443
444         BUG_ON(buffer_jbd(bh));
445         ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
446
447         if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
448                 ret = -EROFS;
449                 mlog_errno(ret);
450                 goto out;
451         }
452
453         lock_buffer(bh);
454         set_buffer_uptodate(bh);
455
456         /* remove from dirty list before I/O. */
457         clear_buffer_dirty(bh);
458
459         get_bh(bh); /* for end_buffer_write_sync() */
460         bh->b_end_io = end_buffer_write_sync;
461         ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
462         submit_bh(WRITE, bh);
463
464         wait_on_buffer(bh);
465
466         if (!buffer_uptodate(bh)) {
467                 ret = -EIO;
468                 mlog_errno(ret);
469         }
470
471 out:
472         return ret;
473 }