GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
30
31 #include <linux/module.h>
32 #include <linux/jiffies.h>
33 #include <linux/drbd.h>
34 #include <linux/uaccess.h>
35 #include <asm/types.h>
36 #include <net/sock.h>
37 #include <linux/ctype.h>
38 #include <linux/mutex.h>
39 #include <linux/fs.h>
40 #include <linux/file.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/mm.h>
44 #include <linux/memcontrol.h>
45 #include <linux/mm_inline.h>
46 #include <linux/slab.h>
47 #include <linux/random.h>
48 #include <linux/reboot.h>
49 #include <linux/notifier.h>
50 #include <linux/kthread.h>
51 #include <linux/workqueue.h>
52 #define __KERNEL_SYSCALLS__
53 #include <linux/unistd.h>
54 #include <linux/vmalloc.h>
55 #include <linux/sched/signal.h>
56
57 #include <linux/drbd_limits.h>
58 #include "drbd_int.h"
59 #include "drbd_protocol.h"
60 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
61 #include "drbd_vli.h"
62 #include "drbd_debugfs.h"
63
64 static DEFINE_MUTEX(drbd_main_mutex);
65 static int drbd_open(struct block_device *bdev, fmode_t mode);
66 static void drbd_release(struct gendisk *gd, fmode_t mode);
67 static void md_sync_timer_fn(struct timer_list *t);
68 static int w_bitmap_io(struct drbd_work *w, int unused);
69
70 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
71               "Lars Ellenberg <lars@linbit.com>");
72 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
73 MODULE_VERSION(REL_VERSION);
74 MODULE_LICENSE("GPL");
75 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
76                  __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
77 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
78
79 #include <linux/moduleparam.h>
80 /* thanks to these macros, if compiled into the kernel (not-module),
81  * these become boot parameters (e.g., drbd.minor_count) */
82
83 #ifdef CONFIG_DRBD_FAULT_INJECTION
84 int drbd_enable_faults;
85 int drbd_fault_rate;
86 static int drbd_fault_count;
87 static int drbd_fault_devs;
88 /* bitmap of enabled faults */
89 module_param_named(enable_faults, drbd_enable_faults, int, 0664);
90 /* fault rate % value - applies to all enabled faults */
91 module_param_named(fault_rate, drbd_fault_rate, int, 0664);
92 /* count of faults inserted */
93 module_param_named(fault_count, drbd_fault_count, int, 0664);
94 /* bitmap of devices to insert faults on */
95 module_param_named(fault_devs, drbd_fault_devs, int, 0644);
96 #endif
97
98 /* module parameters we can keep static */
99 static bool drbd_allow_oos; /* allow_open_on_secondary */
100 static bool drbd_disable_sendpage;
101 MODULE_PARM_DESC(allow_oos, "DONT USE!");
102 module_param_named(allow_oos, drbd_allow_oos, bool, 0);
103 module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
104
105 /* module parameters we share */
106 int drbd_proc_details; /* Detail level in proc drbd*/
107 module_param_named(proc_details, drbd_proc_details, int, 0644);
108 /* module parameters shared with defaults */
109 unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
110 /* Module parameter for setting the user mode helper program
111  * to run. Default is /sbin/drbdadm */
112 char drbd_usermode_helper[80] = "/sbin/drbdadm";
113 module_param_named(minor_count, drbd_minor_count, uint, 0444);
114 module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
115
116 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
117  * as member "struct gendisk *vdisk;"
118  */
119 struct idr drbd_devices;
120 struct list_head drbd_resources;
121 struct mutex resources_mutex;
122
123 struct kmem_cache *drbd_request_cache;
124 struct kmem_cache *drbd_ee_cache;       /* peer requests */
125 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
126 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
127 mempool_t drbd_request_mempool;
128 mempool_t drbd_ee_mempool;
129 mempool_t drbd_md_io_page_pool;
130 struct bio_set drbd_md_io_bio_set;
131 struct bio_set drbd_io_bio_set;
132
133 /* I do not use a standard mempool, because:
134    1) I want to hand out the pre-allocated objects first.
135    2) I want to be able to interrupt sleeping allocation with a signal.
136    Note: This is a single linked list, the next pointer is the private
137          member of struct page.
138  */
139 struct page *drbd_pp_pool;
140 spinlock_t   drbd_pp_lock;
141 int          drbd_pp_vacant;
142 wait_queue_head_t drbd_pp_wait;
143
144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145
146 static const struct block_device_operations drbd_ops = {
147         .owner =   THIS_MODULE,
148         .open =    drbd_open,
149         .release = drbd_release,
150 };
151
152 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
153 {
154         struct bio *bio;
155
156         if (!bioset_initialized(&drbd_md_io_bio_set))
157                 return bio_alloc(gfp_mask, 1);
158
159         bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
160         if (!bio)
161                 return NULL;
162         return bio;
163 }
164
165 #ifdef __CHECKER__
166 /* When checking with sparse, and this is an inline function, sparse will
167    give tons of false positives. When this is a real functions sparse works.
168  */
169 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
170 {
171         int io_allowed;
172
173         atomic_inc(&device->local_cnt);
174         io_allowed = (device->state.disk >= mins);
175         if (!io_allowed) {
176                 if (atomic_dec_and_test(&device->local_cnt))
177                         wake_up(&device->misc_wait);
178         }
179         return io_allowed;
180 }
181
182 #endif
183
184 /**
185  * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
186  * @connection: DRBD connection.
187  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
188  * @set_size:   Expected number of requests before that barrier.
189  *
190  * In case the passed barrier_nr or set_size does not match the oldest
191  * epoch of not yet barrier-acked requests, this function will cause a
192  * termination of the connection.
193  */
194 void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
195                 unsigned int set_size)
196 {
197         struct drbd_request *r;
198         struct drbd_request *req = NULL, *tmp = NULL;
199         int expect_epoch = 0;
200         int expect_size = 0;
201
202         spin_lock_irq(&connection->resource->req_lock);
203
204         /* find oldest not yet barrier-acked write request,
205          * count writes in its epoch. */
206         list_for_each_entry(r, &connection->transfer_log, tl_requests) {
207                 const unsigned s = r->rq_state;
208                 if (!req) {
209                         if (!(s & RQ_WRITE))
210                                 continue;
211                         if (!(s & RQ_NET_MASK))
212                                 continue;
213                         if (s & RQ_NET_DONE)
214                                 continue;
215                         req = r;
216                         expect_epoch = req->epoch;
217                         expect_size ++;
218                 } else {
219                         if (r->epoch != expect_epoch)
220                                 break;
221                         if (!(s & RQ_WRITE))
222                                 continue;
223                         /* if (s & RQ_DONE): not expected */
224                         /* if (!(s & RQ_NET_MASK)): not expected */
225                         expect_size++;
226                 }
227         }
228
229         /* first some paranoia code */
230         if (req == NULL) {
231                 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
232                          barrier_nr);
233                 goto bail;
234         }
235         if (expect_epoch != barrier_nr) {
236                 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
237                          barrier_nr, expect_epoch);
238                 goto bail;
239         }
240
241         if (expect_size != set_size) {
242                 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
243                          barrier_nr, set_size, expect_size);
244                 goto bail;
245         }
246
247         /* Clean up list of requests processed during current epoch. */
248         /* this extra list walk restart is paranoia,
249          * to catch requests being barrier-acked "unexpectedly".
250          * It usually should find the same req again, or some READ preceding it. */
251         list_for_each_entry(req, &connection->transfer_log, tl_requests)
252                 if (req->epoch == expect_epoch) {
253                         tmp = req;
254                         break;
255                 }
256         req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
257         list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
258                 if (req->epoch != expect_epoch)
259                         break;
260                 _req_mod(req, BARRIER_ACKED);
261         }
262         spin_unlock_irq(&connection->resource->req_lock);
263
264         return;
265
266 bail:
267         spin_unlock_irq(&connection->resource->req_lock);
268         conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
269 }
270
271
272 /**
273  * _tl_restart() - Walks the transfer log, and applies an action to all requests
274  * @connection: DRBD connection to operate on.
275  * @what:       The action/event to perform with all request objects
276  *
277  * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
278  * RESTART_FROZEN_DISK_IO.
279  */
280 /* must hold resource->req_lock */
281 void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
282 {
283         struct drbd_request *req, *r;
284
285         list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
286                 _req_mod(req, what);
287 }
288
289 void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
290 {
291         spin_lock_irq(&connection->resource->req_lock);
292         _tl_restart(connection, what);
293         spin_unlock_irq(&connection->resource->req_lock);
294 }
295
296 /**
297  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
298  * @device:     DRBD device.
299  *
300  * This is called after the connection to the peer was lost. The storage covered
301  * by the requests on the transfer gets marked as our of sync. Called from the
302  * receiver thread and the worker thread.
303  */
304 void tl_clear(struct drbd_connection *connection)
305 {
306         tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
307 }
308
309 /**
310  * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
311  * @device:     DRBD device.
312  */
313 void tl_abort_disk_io(struct drbd_device *device)
314 {
315         struct drbd_connection *connection = first_peer_device(device)->connection;
316         struct drbd_request *req, *r;
317
318         spin_lock_irq(&connection->resource->req_lock);
319         list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
320                 if (!(req->rq_state & RQ_LOCAL_PENDING))
321                         continue;
322                 if (req->device != device)
323                         continue;
324                 _req_mod(req, ABORT_DISK_IO);
325         }
326         spin_unlock_irq(&connection->resource->req_lock);
327 }
328
329 static int drbd_thread_setup(void *arg)
330 {
331         struct drbd_thread *thi = (struct drbd_thread *) arg;
332         struct drbd_resource *resource = thi->resource;
333         unsigned long flags;
334         int retval;
335
336         snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
337                  thi->name[0],
338                  resource->name);
339
340         allow_kernel_signal(DRBD_SIGKILL);
341         allow_kernel_signal(SIGXCPU);
342 restart:
343         retval = thi->function(thi);
344
345         spin_lock_irqsave(&thi->t_lock, flags);
346
347         /* if the receiver has been "EXITING", the last thing it did
348          * was set the conn state to "StandAlone",
349          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
350          * and receiver thread will be "started".
351          * drbd_thread_start needs to set "RESTARTING" in that case.
352          * t_state check and assignment needs to be within the same spinlock,
353          * so either thread_start sees EXITING, and can remap to RESTARTING,
354          * or thread_start see NONE, and can proceed as normal.
355          */
356
357         if (thi->t_state == RESTARTING) {
358                 drbd_info(resource, "Restarting %s thread\n", thi->name);
359                 thi->t_state = RUNNING;
360                 spin_unlock_irqrestore(&thi->t_lock, flags);
361                 goto restart;
362         }
363
364         thi->task = NULL;
365         thi->t_state = NONE;
366         smp_mb();
367         complete_all(&thi->stop);
368         spin_unlock_irqrestore(&thi->t_lock, flags);
369
370         drbd_info(resource, "Terminating %s\n", current->comm);
371
372         /* Release mod reference taken when thread was started */
373
374         if (thi->connection)
375                 kref_put(&thi->connection->kref, drbd_destroy_connection);
376         kref_put(&resource->kref, drbd_destroy_resource);
377         module_put(THIS_MODULE);
378         return retval;
379 }
380
381 static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
382                              int (*func) (struct drbd_thread *), const char *name)
383 {
384         spin_lock_init(&thi->t_lock);
385         thi->task    = NULL;
386         thi->t_state = NONE;
387         thi->function = func;
388         thi->resource = resource;
389         thi->connection = NULL;
390         thi->name = name;
391 }
392
393 int drbd_thread_start(struct drbd_thread *thi)
394 {
395         struct drbd_resource *resource = thi->resource;
396         struct task_struct *nt;
397         unsigned long flags;
398
399         /* is used from state engine doing drbd_thread_stop_nowait,
400          * while holding the req lock irqsave */
401         spin_lock_irqsave(&thi->t_lock, flags);
402
403         switch (thi->t_state) {
404         case NONE:
405                 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
406                          thi->name, current->comm, current->pid);
407
408                 /* Get ref on module for thread - this is released when thread exits */
409                 if (!try_module_get(THIS_MODULE)) {
410                         drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
411                         spin_unlock_irqrestore(&thi->t_lock, flags);
412                         return false;
413                 }
414
415                 kref_get(&resource->kref);
416                 if (thi->connection)
417                         kref_get(&thi->connection->kref);
418
419                 init_completion(&thi->stop);
420                 thi->reset_cpu_mask = 1;
421                 thi->t_state = RUNNING;
422                 spin_unlock_irqrestore(&thi->t_lock, flags);
423                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
424
425                 nt = kthread_create(drbd_thread_setup, (void *) thi,
426                                     "drbd_%c_%s", thi->name[0], thi->resource->name);
427
428                 if (IS_ERR(nt)) {
429                         drbd_err(resource, "Couldn't start thread\n");
430
431                         if (thi->connection)
432                                 kref_put(&thi->connection->kref, drbd_destroy_connection);
433                         kref_put(&resource->kref, drbd_destroy_resource);
434                         module_put(THIS_MODULE);
435                         return false;
436                 }
437                 spin_lock_irqsave(&thi->t_lock, flags);
438                 thi->task = nt;
439                 thi->t_state = RUNNING;
440                 spin_unlock_irqrestore(&thi->t_lock, flags);
441                 wake_up_process(nt);
442                 break;
443         case EXITING:
444                 thi->t_state = RESTARTING;
445                 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
446                                 thi->name, current->comm, current->pid);
447                 /* fall through */
448         case RUNNING:
449         case RESTARTING:
450         default:
451                 spin_unlock_irqrestore(&thi->t_lock, flags);
452                 break;
453         }
454
455         return true;
456 }
457
458
459 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
460 {
461         unsigned long flags;
462
463         enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
464
465         /* may be called from state engine, holding the req lock irqsave */
466         spin_lock_irqsave(&thi->t_lock, flags);
467
468         if (thi->t_state == NONE) {
469                 spin_unlock_irqrestore(&thi->t_lock, flags);
470                 if (restart)
471                         drbd_thread_start(thi);
472                 return;
473         }
474
475         if (thi->t_state != ns) {
476                 if (thi->task == NULL) {
477                         spin_unlock_irqrestore(&thi->t_lock, flags);
478                         return;
479                 }
480
481                 thi->t_state = ns;
482                 smp_mb();
483                 init_completion(&thi->stop);
484                 if (thi->task != current)
485                         force_sig(DRBD_SIGKILL, thi->task);
486         }
487
488         spin_unlock_irqrestore(&thi->t_lock, flags);
489
490         if (wait)
491                 wait_for_completion(&thi->stop);
492 }
493
494 int conn_lowest_minor(struct drbd_connection *connection)
495 {
496         struct drbd_peer_device *peer_device;
497         int vnr = 0, minor = -1;
498
499         rcu_read_lock();
500         peer_device = idr_get_next(&connection->peer_devices, &vnr);
501         if (peer_device)
502                 minor = device_to_minor(peer_device->device);
503         rcu_read_unlock();
504
505         return minor;
506 }
507
508 #ifdef CONFIG_SMP
509 /**
510  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
511  *
512  * Forces all threads of a resource onto the same CPU. This is beneficial for
513  * DRBD's performance. May be overwritten by user's configuration.
514  */
515 static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
516 {
517         unsigned int *resources_per_cpu, min_index = ~0;
518
519         resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu),
520                                     GFP_KERNEL);
521         if (resources_per_cpu) {
522                 struct drbd_resource *resource;
523                 unsigned int cpu, min = ~0;
524
525                 rcu_read_lock();
526                 for_each_resource_rcu(resource, &drbd_resources) {
527                         for_each_cpu(cpu, resource->cpu_mask)
528                                 resources_per_cpu[cpu]++;
529                 }
530                 rcu_read_unlock();
531                 for_each_online_cpu(cpu) {
532                         if (resources_per_cpu[cpu] < min) {
533                                 min = resources_per_cpu[cpu];
534                                 min_index = cpu;
535                         }
536                 }
537                 kfree(resources_per_cpu);
538         }
539         if (min_index == ~0) {
540                 cpumask_setall(*cpu_mask);
541                 return;
542         }
543         cpumask_set_cpu(min_index, *cpu_mask);
544 }
545
546 /**
547  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
548  * @device:     DRBD device.
549  * @thi:        drbd_thread object
550  *
551  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
552  * prematurely.
553  */
554 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
555 {
556         struct drbd_resource *resource = thi->resource;
557         struct task_struct *p = current;
558
559         if (!thi->reset_cpu_mask)
560                 return;
561         thi->reset_cpu_mask = 0;
562         set_cpus_allowed_ptr(p, resource->cpu_mask);
563 }
564 #else
565 #define drbd_calc_cpu_mask(A) ({})
566 #endif
567
568 /**
569  * drbd_header_size  -  size of a packet header
570  *
571  * The header size is a multiple of 8, so any payload following the header is
572  * word aligned on 64-bit architectures.  (The bitmap send and receive code
573  * relies on this.)
574  */
575 unsigned int drbd_header_size(struct drbd_connection *connection)
576 {
577         if (connection->agreed_pro_version >= 100) {
578                 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
579                 return sizeof(struct p_header100);
580         } else {
581                 BUILD_BUG_ON(sizeof(struct p_header80) !=
582                              sizeof(struct p_header95));
583                 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
584                 return sizeof(struct p_header80);
585         }
586 }
587
588 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
589 {
590         h->magic   = cpu_to_be32(DRBD_MAGIC);
591         h->command = cpu_to_be16(cmd);
592         h->length  = cpu_to_be16(size);
593         return sizeof(struct p_header80);
594 }
595
596 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
597 {
598         h->magic   = cpu_to_be16(DRBD_MAGIC_BIG);
599         h->command = cpu_to_be16(cmd);
600         h->length = cpu_to_be32(size);
601         return sizeof(struct p_header95);
602 }
603
604 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
605                                       int size, int vnr)
606 {
607         h->magic = cpu_to_be32(DRBD_MAGIC_100);
608         h->volume = cpu_to_be16(vnr);
609         h->command = cpu_to_be16(cmd);
610         h->length = cpu_to_be32(size);
611         h->pad = 0;
612         return sizeof(struct p_header100);
613 }
614
615 static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
616                                    void *buffer, enum drbd_packet cmd, int size)
617 {
618         if (connection->agreed_pro_version >= 100)
619                 return prepare_header100(buffer, cmd, size, vnr);
620         else if (connection->agreed_pro_version >= 95 &&
621                  size > DRBD_MAX_SIZE_H80_PACKET)
622                 return prepare_header95(buffer, cmd, size);
623         else
624                 return prepare_header80(buffer, cmd, size);
625 }
626
627 static void *__conn_prepare_command(struct drbd_connection *connection,
628                                     struct drbd_socket *sock)
629 {
630         if (!sock->socket)
631                 return NULL;
632         return sock->sbuf + drbd_header_size(connection);
633 }
634
635 void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
636 {
637         void *p;
638
639         mutex_lock(&sock->mutex);
640         p = __conn_prepare_command(connection, sock);
641         if (!p)
642                 mutex_unlock(&sock->mutex);
643
644         return p;
645 }
646
647 void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
648 {
649         return conn_prepare_command(peer_device->connection, sock);
650 }
651
652 static int __send_command(struct drbd_connection *connection, int vnr,
653                           struct drbd_socket *sock, enum drbd_packet cmd,
654                           unsigned int header_size, void *data,
655                           unsigned int size)
656 {
657         int msg_flags;
658         int err;
659
660         /*
661          * Called with @data == NULL and the size of the data blocks in @size
662          * for commands that send data blocks.  For those commands, omit the
663          * MSG_MORE flag: this will increase the likelihood that data blocks
664          * which are page aligned on the sender will end up page aligned on the
665          * receiver.
666          */
667         msg_flags = data ? MSG_MORE : 0;
668
669         header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
670                                       header_size + size);
671         err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
672                             msg_flags);
673         if (data && !err)
674                 err = drbd_send_all(connection, sock->socket, data, size, 0);
675         /* DRBD protocol "pings" are latency critical.
676          * This is supposed to trigger tcp_push_pending_frames() */
677         if (!err && (cmd == P_PING || cmd == P_PING_ACK))
678                 drbd_tcp_nodelay(sock->socket);
679
680         return err;
681 }
682
683 static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
684                                enum drbd_packet cmd, unsigned int header_size,
685                                void *data, unsigned int size)
686 {
687         return __send_command(connection, 0, sock, cmd, header_size, data, size);
688 }
689
690 int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
691                       enum drbd_packet cmd, unsigned int header_size,
692                       void *data, unsigned int size)
693 {
694         int err;
695
696         err = __conn_send_command(connection, sock, cmd, header_size, data, size);
697         mutex_unlock(&sock->mutex);
698         return err;
699 }
700
701 int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
702                       enum drbd_packet cmd, unsigned int header_size,
703                       void *data, unsigned int size)
704 {
705         int err;
706
707         err = __send_command(peer_device->connection, peer_device->device->vnr,
708                              sock, cmd, header_size, data, size);
709         mutex_unlock(&sock->mutex);
710         return err;
711 }
712
713 int drbd_send_ping(struct drbd_connection *connection)
714 {
715         struct drbd_socket *sock;
716
717         sock = &connection->meta;
718         if (!conn_prepare_command(connection, sock))
719                 return -EIO;
720         return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
721 }
722
723 int drbd_send_ping_ack(struct drbd_connection *connection)
724 {
725         struct drbd_socket *sock;
726
727         sock = &connection->meta;
728         if (!conn_prepare_command(connection, sock))
729                 return -EIO;
730         return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
731 }
732
733 int drbd_send_sync_param(struct drbd_peer_device *peer_device)
734 {
735         struct drbd_socket *sock;
736         struct p_rs_param_95 *p;
737         int size;
738         const int apv = peer_device->connection->agreed_pro_version;
739         enum drbd_packet cmd;
740         struct net_conf *nc;
741         struct disk_conf *dc;
742
743         sock = &peer_device->connection->data;
744         p = drbd_prepare_command(peer_device, sock);
745         if (!p)
746                 return -EIO;
747
748         rcu_read_lock();
749         nc = rcu_dereference(peer_device->connection->net_conf);
750
751         size = apv <= 87 ? sizeof(struct p_rs_param)
752                 : apv == 88 ? sizeof(struct p_rs_param)
753                         + strlen(nc->verify_alg) + 1
754                 : apv <= 94 ? sizeof(struct p_rs_param_89)
755                 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
756
757         cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
758
759         /* initialize verify_alg and csums_alg */
760         memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
761
762         if (get_ldev(peer_device->device)) {
763                 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
764                 p->resync_rate = cpu_to_be32(dc->resync_rate);
765                 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
766                 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
767                 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
768                 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
769                 put_ldev(peer_device->device);
770         } else {
771                 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
772                 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
773                 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
774                 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
775                 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
776         }
777
778         if (apv >= 88)
779                 strcpy(p->verify_alg, nc->verify_alg);
780         if (apv >= 89)
781                 strcpy(p->csums_alg, nc->csums_alg);
782         rcu_read_unlock();
783
784         return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
785 }
786
787 int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
788 {
789         struct drbd_socket *sock;
790         struct p_protocol *p;
791         struct net_conf *nc;
792         int size, cf;
793
794         sock = &connection->data;
795         p = __conn_prepare_command(connection, sock);
796         if (!p)
797                 return -EIO;
798
799         rcu_read_lock();
800         nc = rcu_dereference(connection->net_conf);
801
802         if (nc->tentative && connection->agreed_pro_version < 92) {
803                 rcu_read_unlock();
804                 drbd_err(connection, "--dry-run is not supported by peer");
805                 return -EOPNOTSUPP;
806         }
807
808         size = sizeof(*p);
809         if (connection->agreed_pro_version >= 87)
810                 size += strlen(nc->integrity_alg) + 1;
811
812         p->protocol      = cpu_to_be32(nc->wire_protocol);
813         p->after_sb_0p   = cpu_to_be32(nc->after_sb_0p);
814         p->after_sb_1p   = cpu_to_be32(nc->after_sb_1p);
815         p->after_sb_2p   = cpu_to_be32(nc->after_sb_2p);
816         p->two_primaries = cpu_to_be32(nc->two_primaries);
817         cf = 0;
818         if (nc->discard_my_data)
819                 cf |= CF_DISCARD_MY_DATA;
820         if (nc->tentative)
821                 cf |= CF_DRY_RUN;
822         p->conn_flags    = cpu_to_be32(cf);
823
824         if (connection->agreed_pro_version >= 87)
825                 strcpy(p->integrity_alg, nc->integrity_alg);
826         rcu_read_unlock();
827
828         return __conn_send_command(connection, sock, cmd, size, NULL, 0);
829 }
830
831 int drbd_send_protocol(struct drbd_connection *connection)
832 {
833         int err;
834
835         mutex_lock(&connection->data.mutex);
836         err = __drbd_send_protocol(connection, P_PROTOCOL);
837         mutex_unlock(&connection->data.mutex);
838
839         return err;
840 }
841
842 static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
843 {
844         struct drbd_device *device = peer_device->device;
845         struct drbd_socket *sock;
846         struct p_uuids *p;
847         int i;
848
849         if (!get_ldev_if_state(device, D_NEGOTIATING))
850                 return 0;
851
852         sock = &peer_device->connection->data;
853         p = drbd_prepare_command(peer_device, sock);
854         if (!p) {
855                 put_ldev(device);
856                 return -EIO;
857         }
858         spin_lock_irq(&device->ldev->md.uuid_lock);
859         for (i = UI_CURRENT; i < UI_SIZE; i++)
860                 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
861         spin_unlock_irq(&device->ldev->md.uuid_lock);
862
863         device->comm_bm_set = drbd_bm_total_weight(device);
864         p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
865         rcu_read_lock();
866         uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
867         rcu_read_unlock();
868         uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
869         uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
870         p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
871
872         put_ldev(device);
873         return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
874 }
875
876 int drbd_send_uuids(struct drbd_peer_device *peer_device)
877 {
878         return _drbd_send_uuids(peer_device, 0);
879 }
880
881 int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
882 {
883         return _drbd_send_uuids(peer_device, 8);
884 }
885
886 void drbd_print_uuids(struct drbd_device *device, const char *text)
887 {
888         if (get_ldev_if_state(device, D_NEGOTIATING)) {
889                 u64 *uuid = device->ldev->md.uuid;
890                 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
891                      text,
892                      (unsigned long long)uuid[UI_CURRENT],
893                      (unsigned long long)uuid[UI_BITMAP],
894                      (unsigned long long)uuid[UI_HISTORY_START],
895                      (unsigned long long)uuid[UI_HISTORY_END]);
896                 put_ldev(device);
897         } else {
898                 drbd_info(device, "%s effective data uuid: %016llX\n",
899                                 text,
900                                 (unsigned long long)device->ed_uuid);
901         }
902 }
903
904 void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
905 {
906         struct drbd_device *device = peer_device->device;
907         struct drbd_socket *sock;
908         struct p_rs_uuid *p;
909         u64 uuid;
910
911         D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
912
913         uuid = device->ldev->md.uuid[UI_BITMAP];
914         if (uuid && uuid != UUID_JUST_CREATED)
915                 uuid = uuid + UUID_NEW_BM_OFFSET;
916         else
917                 get_random_bytes(&uuid, sizeof(u64));
918         drbd_uuid_set(device, UI_BITMAP, uuid);
919         drbd_print_uuids(device, "updated sync UUID");
920         drbd_md_sync(device);
921
922         sock = &peer_device->connection->data;
923         p = drbd_prepare_command(peer_device, sock);
924         if (p) {
925                 p->uuid = cpu_to_be64(uuid);
926                 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
927         }
928 }
929
930 /* communicated if (agreed_features & DRBD_FF_WSAME) */
931 static void
932 assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
933                                         struct request_queue *q)
934 {
935         if (q) {
936                 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
937                 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
938                 p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
939                 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
940                 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
941                 p->qlim->discard_enabled = blk_queue_discard(q);
942                 p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
943         } else {
944                 q = device->rq_queue;
945                 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
946                 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
947                 p->qlim->alignment_offset = 0;
948                 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
949                 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
950                 p->qlim->discard_enabled = 0;
951                 p->qlim->write_same_capable = 0;
952         }
953 }
954
955 int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
956 {
957         struct drbd_device *device = peer_device->device;
958         struct drbd_socket *sock;
959         struct p_sizes *p;
960         sector_t d_size, u_size;
961         int q_order_type;
962         unsigned int max_bio_size;
963         unsigned int packet_size;
964
965         sock = &peer_device->connection->data;
966         p = drbd_prepare_command(peer_device, sock);
967         if (!p)
968                 return -EIO;
969
970         packet_size = sizeof(*p);
971         if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
972                 packet_size += sizeof(p->qlim[0]);
973
974         memset(p, 0, packet_size);
975         if (get_ldev_if_state(device, D_NEGOTIATING)) {
976                 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
977                 d_size = drbd_get_max_capacity(device->ldev);
978                 rcu_read_lock();
979                 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
980                 rcu_read_unlock();
981                 q_order_type = drbd_queue_order_type(device);
982                 max_bio_size = queue_max_hw_sectors(q) << 9;
983                 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
984                 assign_p_sizes_qlim(device, p, q);
985                 put_ldev(device);
986         } else {
987                 d_size = 0;
988                 u_size = 0;
989                 q_order_type = QUEUE_ORDERED_NONE;
990                 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
991                 assign_p_sizes_qlim(device, p, NULL);
992         }
993
994         if (peer_device->connection->agreed_pro_version <= 94)
995                 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
996         else if (peer_device->connection->agreed_pro_version < 100)
997                 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
998
999         p->d_size = cpu_to_be64(d_size);
1000         p->u_size = cpu_to_be64(u_size);
1001         p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
1002         p->max_bio_size = cpu_to_be32(max_bio_size);
1003         p->queue_order_type = cpu_to_be16(q_order_type);
1004         p->dds_flags = cpu_to_be16(flags);
1005
1006         return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
1007 }
1008
1009 /**
1010  * drbd_send_current_state() - Sends the drbd state to the peer
1011  * @peer_device:        DRBD peer device.
1012  */
1013 int drbd_send_current_state(struct drbd_peer_device *peer_device)
1014 {
1015         struct drbd_socket *sock;
1016         struct p_state *p;
1017
1018         sock = &peer_device->connection->data;
1019         p = drbd_prepare_command(peer_device, sock);
1020         if (!p)
1021                 return -EIO;
1022         p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1023         return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1024 }
1025
1026 /**
1027  * drbd_send_state() - After a state change, sends the new state to the peer
1028  * @peer_device:      DRBD peer device.
1029  * @state:     the state to send, not necessarily the current state.
1030  *
1031  * Each state change queues an "after_state_ch" work, which will eventually
1032  * send the resulting new state to the peer. If more state changes happen
1033  * between queuing and processing of the after_state_ch work, we still
1034  * want to send each intermediary state in the order it occurred.
1035  */
1036 int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1037 {
1038         struct drbd_socket *sock;
1039         struct p_state *p;
1040
1041         sock = &peer_device->connection->data;
1042         p = drbd_prepare_command(peer_device, sock);
1043         if (!p)
1044                 return -EIO;
1045         p->state = cpu_to_be32(state.i); /* Within the send mutex */
1046         return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1047 }
1048
1049 int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1050 {
1051         struct drbd_socket *sock;
1052         struct p_req_state *p;
1053
1054         sock = &peer_device->connection->data;
1055         p = drbd_prepare_command(peer_device, sock);
1056         if (!p)
1057                 return -EIO;
1058         p->mask = cpu_to_be32(mask.i);
1059         p->val = cpu_to_be32(val.i);
1060         return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1061 }
1062
1063 int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1064 {
1065         enum drbd_packet cmd;
1066         struct drbd_socket *sock;
1067         struct p_req_state *p;
1068
1069         cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1070         sock = &connection->data;
1071         p = conn_prepare_command(connection, sock);
1072         if (!p)
1073                 return -EIO;
1074         p->mask = cpu_to_be32(mask.i);
1075         p->val = cpu_to_be32(val.i);
1076         return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1077 }
1078
1079 void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1080 {
1081         struct drbd_socket *sock;
1082         struct p_req_state_reply *p;
1083
1084         sock = &peer_device->connection->meta;
1085         p = drbd_prepare_command(peer_device, sock);
1086         if (p) {
1087                 p->retcode = cpu_to_be32(retcode);
1088                 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1089         }
1090 }
1091
1092 void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1093 {
1094         struct drbd_socket *sock;
1095         struct p_req_state_reply *p;
1096         enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1097
1098         sock = &connection->meta;
1099         p = conn_prepare_command(connection, sock);
1100         if (p) {
1101                 p->retcode = cpu_to_be32(retcode);
1102                 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1103         }
1104 }
1105
1106 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1107 {
1108         BUG_ON(code & ~0xf);
1109         p->encoding = (p->encoding & ~0xf) | code;
1110 }
1111
1112 static void dcbp_set_start(struct p_compressed_bm *p, int set)
1113 {
1114         p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1115 }
1116
1117 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1118 {
1119         BUG_ON(n & ~0x7);
1120         p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1121 }
1122
1123 static int fill_bitmap_rle_bits(struct drbd_device *device,
1124                          struct p_compressed_bm *p,
1125                          unsigned int size,
1126                          struct bm_xfer_ctx *c)
1127 {
1128         struct bitstream bs;
1129         unsigned long plain_bits;
1130         unsigned long tmp;
1131         unsigned long rl;
1132         unsigned len;
1133         unsigned toggle;
1134         int bits, use_rle;
1135
1136         /* may we use this feature? */
1137         rcu_read_lock();
1138         use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1139         rcu_read_unlock();
1140         if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1141                 return 0;
1142
1143         if (c->bit_offset >= c->bm_bits)
1144                 return 0; /* nothing to do. */
1145
1146         /* use at most thus many bytes */
1147         bitstream_init(&bs, p->code, size, 0);
1148         memset(p->code, 0, size);
1149         /* plain bits covered in this code string */
1150         plain_bits = 0;
1151
1152         /* p->encoding & 0x80 stores whether the first run length is set.
1153          * bit offset is implicit.
1154          * start with toggle == 2 to be able to tell the first iteration */
1155         toggle = 2;
1156
1157         /* see how much plain bits we can stuff into one packet
1158          * using RLE and VLI. */
1159         do {
1160                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1161                                     : _drbd_bm_find_next(device, c->bit_offset);
1162                 if (tmp == -1UL)
1163                         tmp = c->bm_bits;
1164                 rl = tmp - c->bit_offset;
1165
1166                 if (toggle == 2) { /* first iteration */
1167                         if (rl == 0) {
1168                                 /* the first checked bit was set,
1169                                  * store start value, */
1170                                 dcbp_set_start(p, 1);
1171                                 /* but skip encoding of zero run length */
1172                                 toggle = !toggle;
1173                                 continue;
1174                         }
1175                         dcbp_set_start(p, 0);
1176                 }
1177
1178                 /* paranoia: catch zero runlength.
1179                  * can only happen if bitmap is modified while we scan it. */
1180                 if (rl == 0) {
1181                         drbd_err(device, "unexpected zero runlength while encoding bitmap "
1182                             "t:%u bo:%lu\n", toggle, c->bit_offset);
1183                         return -1;
1184                 }
1185
1186                 bits = vli_encode_bits(&bs, rl);
1187                 if (bits == -ENOBUFS) /* buffer full */
1188                         break;
1189                 if (bits <= 0) {
1190                         drbd_err(device, "error while encoding bitmap: %d\n", bits);
1191                         return 0;
1192                 }
1193
1194                 toggle = !toggle;
1195                 plain_bits += rl;
1196                 c->bit_offset = tmp;
1197         } while (c->bit_offset < c->bm_bits);
1198
1199         len = bs.cur.b - p->code + !!bs.cur.bit;
1200
1201         if (plain_bits < (len << 3)) {
1202                 /* incompressible with this method.
1203                  * we need to rewind both word and bit position. */
1204                 c->bit_offset -= plain_bits;
1205                 bm_xfer_ctx_bit_to_word_offset(c);
1206                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1207                 return 0;
1208         }
1209
1210         /* RLE + VLI was able to compress it just fine.
1211          * update c->word_offset. */
1212         bm_xfer_ctx_bit_to_word_offset(c);
1213
1214         /* store pad_bits */
1215         dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1216
1217         return len;
1218 }
1219
1220 /**
1221  * send_bitmap_rle_or_plain
1222  *
1223  * Return 0 when done, 1 when another iteration is needed, and a negative error
1224  * code upon failure.
1225  */
1226 static int
1227 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1228 {
1229         struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1230         unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1231         struct p_compressed_bm *p = sock->sbuf + header_size;
1232         int len, err;
1233
1234         len = fill_bitmap_rle_bits(device, p,
1235                         DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1236         if (len < 0)
1237                 return -EIO;
1238
1239         if (len) {
1240                 dcbp_set_code(p, RLE_VLI_Bits);
1241                 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1242                                      P_COMPRESSED_BITMAP, sizeof(*p) + len,
1243                                      NULL, 0);
1244                 c->packets[0]++;
1245                 c->bytes[0] += header_size + sizeof(*p) + len;
1246
1247                 if (c->bit_offset >= c->bm_bits)
1248                         len = 0; /* DONE */
1249         } else {
1250                 /* was not compressible.
1251                  * send a buffer full of plain text bits instead. */
1252                 unsigned int data_size;
1253                 unsigned long num_words;
1254                 unsigned long *p = sock->sbuf + header_size;
1255
1256                 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1257                 num_words = min_t(size_t, data_size / sizeof(*p),
1258                                   c->bm_words - c->word_offset);
1259                 len = num_words * sizeof(*p);
1260                 if (len)
1261                         drbd_bm_get_lel(device, c->word_offset, num_words, p);
1262                 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1263                 c->word_offset += num_words;
1264                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1265
1266                 c->packets[1]++;
1267                 c->bytes[1] += header_size + len;
1268
1269                 if (c->bit_offset > c->bm_bits)
1270                         c->bit_offset = c->bm_bits;
1271         }
1272         if (!err) {
1273                 if (len == 0) {
1274                         INFO_bm_xfer_stats(device, "send", c);
1275                         return 0;
1276                 } else
1277                         return 1;
1278         }
1279         return -EIO;
1280 }
1281
1282 /* See the comment at receive_bitmap() */
1283 static int _drbd_send_bitmap(struct drbd_device *device)
1284 {
1285         struct bm_xfer_ctx c;
1286         int err;
1287
1288         if (!expect(device->bitmap))
1289                 return false;
1290
1291         if (get_ldev(device)) {
1292                 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1293                         drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1294                         drbd_bm_set_all(device);
1295                         if (drbd_bm_write(device)) {
1296                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1297                                  * but otherwise process as per normal - need to tell other
1298                                  * side that a full resync is required! */
1299                                 drbd_err(device, "Failed to write bitmap to disk!\n");
1300                         } else {
1301                                 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1302                                 drbd_md_sync(device);
1303                         }
1304                 }
1305                 put_ldev(device);
1306         }
1307
1308         c = (struct bm_xfer_ctx) {
1309                 .bm_bits = drbd_bm_bits(device),
1310                 .bm_words = drbd_bm_words(device),
1311         };
1312
1313         do {
1314                 err = send_bitmap_rle_or_plain(device, &c);
1315         } while (err > 0);
1316
1317         return err == 0;
1318 }
1319
1320 int drbd_send_bitmap(struct drbd_device *device)
1321 {
1322         struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1323         int err = -1;
1324
1325         mutex_lock(&sock->mutex);
1326         if (sock->socket)
1327                 err = !_drbd_send_bitmap(device);
1328         mutex_unlock(&sock->mutex);
1329         return err;
1330 }
1331
1332 void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1333 {
1334         struct drbd_socket *sock;
1335         struct p_barrier_ack *p;
1336
1337         if (connection->cstate < C_WF_REPORT_PARAMS)
1338                 return;
1339
1340         sock = &connection->meta;
1341         p = conn_prepare_command(connection, sock);
1342         if (!p)
1343                 return;
1344         p->barrier = barrier_nr;
1345         p->set_size = cpu_to_be32(set_size);
1346         conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1347 }
1348
1349 /**
1350  * _drbd_send_ack() - Sends an ack packet
1351  * @device:     DRBD device.
1352  * @cmd:        Packet command code.
1353  * @sector:     sector, needs to be in big endian byte order
1354  * @blksize:    size in byte, needs to be in big endian byte order
1355  * @block_id:   Id, big endian byte order
1356  */
1357 static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1358                           u64 sector, u32 blksize, u64 block_id)
1359 {
1360         struct drbd_socket *sock;
1361         struct p_block_ack *p;
1362
1363         if (peer_device->device->state.conn < C_CONNECTED)
1364                 return -EIO;
1365
1366         sock = &peer_device->connection->meta;
1367         p = drbd_prepare_command(peer_device, sock);
1368         if (!p)
1369                 return -EIO;
1370         p->sector = sector;
1371         p->block_id = block_id;
1372         p->blksize = blksize;
1373         p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1374         return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1375 }
1376
1377 /* dp->sector and dp->block_id already/still in network byte order,
1378  * data_size is payload size according to dp->head,
1379  * and may need to be corrected for digest size. */
1380 void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1381                       struct p_data *dp, int data_size)
1382 {
1383         if (peer_device->connection->peer_integrity_tfm)
1384                 data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
1385         _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1386                        dp->block_id);
1387 }
1388
1389 void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1390                       struct p_block_req *rp)
1391 {
1392         _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1393 }
1394
1395 /**
1396  * drbd_send_ack() - Sends an ack packet
1397  * @device:     DRBD device
1398  * @cmd:        packet command code
1399  * @peer_req:   peer request
1400  */
1401 int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1402                   struct drbd_peer_request *peer_req)
1403 {
1404         return _drbd_send_ack(peer_device, cmd,
1405                               cpu_to_be64(peer_req->i.sector),
1406                               cpu_to_be32(peer_req->i.size),
1407                               peer_req->block_id);
1408 }
1409
1410 /* This function misuses the block_id field to signal if the blocks
1411  * are is sync or not. */
1412 int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1413                      sector_t sector, int blksize, u64 block_id)
1414 {
1415         return _drbd_send_ack(peer_device, cmd,
1416                               cpu_to_be64(sector),
1417                               cpu_to_be32(blksize),
1418                               cpu_to_be64(block_id));
1419 }
1420
1421 int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1422                              struct drbd_peer_request *peer_req)
1423 {
1424         struct drbd_socket *sock;
1425         struct p_block_desc *p;
1426
1427         sock = &peer_device->connection->data;
1428         p = drbd_prepare_command(peer_device, sock);
1429         if (!p)
1430                 return -EIO;
1431         p->sector = cpu_to_be64(peer_req->i.sector);
1432         p->blksize = cpu_to_be32(peer_req->i.size);
1433         p->pad = 0;
1434         return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1435 }
1436
1437 int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1438                        sector_t sector, int size, u64 block_id)
1439 {
1440         struct drbd_socket *sock;
1441         struct p_block_req *p;
1442
1443         sock = &peer_device->connection->data;
1444         p = drbd_prepare_command(peer_device, sock);
1445         if (!p)
1446                 return -EIO;
1447         p->sector = cpu_to_be64(sector);
1448         p->block_id = block_id;
1449         p->blksize = cpu_to_be32(size);
1450         return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1451 }
1452
1453 int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1454                             void *digest, int digest_size, enum drbd_packet cmd)
1455 {
1456         struct drbd_socket *sock;
1457         struct p_block_req *p;
1458
1459         /* FIXME: Put the digest into the preallocated socket buffer.  */
1460
1461         sock = &peer_device->connection->data;
1462         p = drbd_prepare_command(peer_device, sock);
1463         if (!p)
1464                 return -EIO;
1465         p->sector = cpu_to_be64(sector);
1466         p->block_id = ID_SYNCER /* unused */;
1467         p->blksize = cpu_to_be32(size);
1468         return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1469 }
1470
1471 int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1472 {
1473         struct drbd_socket *sock;
1474         struct p_block_req *p;
1475
1476         sock = &peer_device->connection->data;
1477         p = drbd_prepare_command(peer_device, sock);
1478         if (!p)
1479                 return -EIO;
1480         p->sector = cpu_to_be64(sector);
1481         p->block_id = ID_SYNCER /* unused */;
1482         p->blksize = cpu_to_be32(size);
1483         return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1484 }
1485
1486 /* called on sndtimeo
1487  * returns false if we should retry,
1488  * true if we think connection is dead
1489  */
1490 static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1491 {
1492         int drop_it;
1493         /* long elapsed = (long)(jiffies - device->last_received); */
1494
1495         drop_it =   connection->meta.socket == sock
1496                 || !connection->ack_receiver.task
1497                 || get_t_state(&connection->ack_receiver) != RUNNING
1498                 || connection->cstate < C_WF_REPORT_PARAMS;
1499
1500         if (drop_it)
1501                 return true;
1502
1503         drop_it = !--connection->ko_count;
1504         if (!drop_it) {
1505                 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1506                          current->comm, current->pid, connection->ko_count);
1507                 request_ping(connection);
1508         }
1509
1510         return drop_it; /* && (device->state == R_PRIMARY) */;
1511 }
1512
1513 static void drbd_update_congested(struct drbd_connection *connection)
1514 {
1515         struct sock *sk = connection->data.socket->sk;
1516         if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1517                 set_bit(NET_CONGESTED, &connection->flags);
1518 }
1519
1520 /* The idea of sendpage seems to be to put some kind of reference
1521  * to the page into the skb, and to hand it over to the NIC. In
1522  * this process get_page() gets called.
1523  *
1524  * As soon as the page was really sent over the network put_page()
1525  * gets called by some part of the network layer. [ NIC driver? ]
1526  *
1527  * [ get_page() / put_page() increment/decrement the count. If count
1528  *   reaches 0 the page will be freed. ]
1529  *
1530  * This works nicely with pages from FSs.
1531  * But this means that in protocol A we might signal IO completion too early!
1532  *
1533  * In order not to corrupt data during a resync we must make sure
1534  * that we do not reuse our own buffer pages (EEs) to early, therefore
1535  * we have the net_ee list.
1536  *
1537  * XFS seems to have problems, still, it submits pages with page_count == 0!
1538  * As a workaround, we disable sendpage on pages
1539  * with page_count == 0 or PageSlab.
1540  */
1541 static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1542                               int offset, size_t size, unsigned msg_flags)
1543 {
1544         struct socket *socket;
1545         void *addr;
1546         int err;
1547
1548         socket = peer_device->connection->data.socket;
1549         addr = kmap(page) + offset;
1550         err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1551         kunmap(page);
1552         if (!err)
1553                 peer_device->device->send_cnt += size >> 9;
1554         return err;
1555 }
1556
1557 static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1558                     int offset, size_t size, unsigned msg_flags)
1559 {
1560         struct socket *socket = peer_device->connection->data.socket;
1561         int len = size;
1562         int err = -EIO;
1563
1564         /* e.g. XFS meta- & log-data is in slab pages, which have a
1565          * page_count of 0 and/or have PageSlab() set.
1566          * we cannot use send_page for those, as that does get_page();
1567          * put_page(); and would cause either a VM_BUG directly, or
1568          * __page_cache_release a page that would actually still be referenced
1569          * by someone, leading to some obscure delayed Oops somewhere else. */
1570         if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1571                 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1572
1573         msg_flags |= MSG_NOSIGNAL;
1574         drbd_update_congested(peer_device->connection);
1575         do {
1576                 int sent;
1577
1578                 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1579                 if (sent <= 0) {
1580                         if (sent == -EAGAIN) {
1581                                 if (we_should_drop_the_connection(peer_device->connection, socket))
1582                                         break;
1583                                 continue;
1584                         }
1585                         drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1586                              __func__, (int)size, len, sent);
1587                         if (sent < 0)
1588                                 err = sent;
1589                         break;
1590                 }
1591                 len    -= sent;
1592                 offset += sent;
1593         } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1594         clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1595
1596         if (len == 0) {
1597                 err = 0;
1598                 peer_device->device->send_cnt += size >> 9;
1599         }
1600         return err;
1601 }
1602
1603 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1604 {
1605         struct bio_vec bvec;
1606         struct bvec_iter iter;
1607
1608         /* hint all but last page with MSG_MORE */
1609         bio_for_each_segment(bvec, bio, iter) {
1610                 int err;
1611
1612                 err = _drbd_no_send_page(peer_device, bvec.bv_page,
1613                                          bvec.bv_offset, bvec.bv_len,
1614                                          bio_iter_last(bvec, iter)
1615                                          ? 0 : MSG_MORE);
1616                 if (err)
1617                         return err;
1618                 /* REQ_OP_WRITE_SAME has only one segment */
1619                 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1620                         break;
1621         }
1622         return 0;
1623 }
1624
1625 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1626 {
1627         struct bio_vec bvec;
1628         struct bvec_iter iter;
1629
1630         /* hint all but last page with MSG_MORE */
1631         bio_for_each_segment(bvec, bio, iter) {
1632                 int err;
1633
1634                 err = _drbd_send_page(peer_device, bvec.bv_page,
1635                                       bvec.bv_offset, bvec.bv_len,
1636                                       bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1637                 if (err)
1638                         return err;
1639                 /* REQ_OP_WRITE_SAME has only one segment */
1640                 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1641                         break;
1642         }
1643         return 0;
1644 }
1645
1646 static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1647                             struct drbd_peer_request *peer_req)
1648 {
1649         struct page *page = peer_req->pages;
1650         unsigned len = peer_req->i.size;
1651         int err;
1652
1653         /* hint all but last page with MSG_MORE */
1654         page_chain_for_each(page) {
1655                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1656
1657                 err = _drbd_send_page(peer_device, page, 0, l,
1658                                       page_chain_next(page) ? MSG_MORE : 0);
1659                 if (err)
1660                         return err;
1661                 len -= l;
1662         }
1663         return 0;
1664 }
1665
1666 static u32 bio_flags_to_wire(struct drbd_connection *connection,
1667                              struct bio *bio)
1668 {
1669         if (connection->agreed_pro_version >= 95)
1670                 return  (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1671                         (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1672                         (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1673                         (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1674                         (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
1675                         (bio_op(bio) == REQ_OP_WRITE_ZEROES ? DP_DISCARD : 0);
1676         else
1677                 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1678 }
1679
1680 /* Used to send write or TRIM aka REQ_DISCARD requests
1681  * R_PRIMARY -> Peer    (P_DATA, P_TRIM)
1682  */
1683 int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1684 {
1685         struct drbd_device *device = peer_device->device;
1686         struct drbd_socket *sock;
1687         struct p_data *p;
1688         struct p_wsame *wsame = NULL;
1689         void *digest_out;
1690         unsigned int dp_flags = 0;
1691         int digest_size;
1692         int err;
1693
1694         sock = &peer_device->connection->data;
1695         p = drbd_prepare_command(peer_device, sock);
1696         digest_size = peer_device->connection->integrity_tfm ?
1697                       crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1698
1699         if (!p)
1700                 return -EIO;
1701         p->sector = cpu_to_be64(req->i.sector);
1702         p->block_id = (unsigned long)req;
1703         p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1704         dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1705         if (device->state.conn >= C_SYNC_SOURCE &&
1706             device->state.conn <= C_PAUSED_SYNC_T)
1707                 dp_flags |= DP_MAY_SET_IN_SYNC;
1708         if (peer_device->connection->agreed_pro_version >= 100) {
1709                 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1710                         dp_flags |= DP_SEND_RECEIVE_ACK;
1711                 /* During resync, request an explicit write ack,
1712                  * even in protocol != C */
1713                 if (req->rq_state & RQ_EXP_WRITE_ACK
1714                 || (dp_flags & DP_MAY_SET_IN_SYNC))
1715                         dp_flags |= DP_SEND_WRITE_ACK;
1716         }
1717         p->dp_flags = cpu_to_be32(dp_flags);
1718
1719         if (dp_flags & DP_DISCARD) {
1720                 struct p_trim *t = (struct p_trim*)p;
1721                 t->size = cpu_to_be32(req->i.size);
1722                 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
1723                 goto out;
1724         }
1725         if (dp_flags & DP_WSAME) {
1726                 /* this will only work if DRBD_FF_WSAME is set AND the
1727                  * handshake agreed that all nodes and backend devices are
1728                  * WRITE_SAME capable and agree on logical_block_size */
1729                 wsame = (struct p_wsame*)p;
1730                 digest_out = wsame + 1;
1731                 wsame->size = cpu_to_be32(req->i.size);
1732         } else
1733                 digest_out = p + 1;
1734
1735         /* our digest is still only over the payload.
1736          * TRIM does not carry any payload. */
1737         if (digest_size)
1738                 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1739         if (wsame) {
1740                 err =
1741                     __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1742                                    sizeof(*wsame) + digest_size, NULL,
1743                                    bio_iovec(req->master_bio).bv_len);
1744         } else
1745                 err =
1746                     __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1747                                    sizeof(*p) + digest_size, NULL, req->i.size);
1748         if (!err) {
1749                 /* For protocol A, we have to memcpy the payload into
1750                  * socket buffers, as we may complete right away
1751                  * as soon as we handed it over to tcp, at which point the data
1752                  * pages may become invalid.
1753                  *
1754                  * For data-integrity enabled, we copy it as well, so we can be
1755                  * sure that even if the bio pages may still be modified, it
1756                  * won't change the data on the wire, thus if the digest checks
1757                  * out ok after sending on this side, but does not fit on the
1758                  * receiving side, we sure have detected corruption elsewhere.
1759                  */
1760                 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1761                         err = _drbd_send_bio(peer_device, req->master_bio);
1762                 else
1763                         err = _drbd_send_zc_bio(peer_device, req->master_bio);
1764
1765                 /* double check digest, sometimes buffers have been modified in flight. */
1766                 if (digest_size > 0 && digest_size <= 64) {
1767                         /* 64 byte, 512 bit, is the largest digest size
1768                          * currently supported in kernel crypto. */
1769                         unsigned char digest[64];
1770                         drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1771                         if (memcmp(p + 1, digest, digest_size)) {
1772                                 drbd_warn(device,
1773                                         "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1774                                         (unsigned long long)req->i.sector, req->i.size);
1775                         }
1776                 } /* else if (digest_size > 64) {
1777                      ... Be noisy about digest too large ...
1778                 } */
1779         }
1780 out:
1781         mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1782
1783         return err;
1784 }
1785
1786 /* answer packet, used to send data back for read requests:
1787  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
1788  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
1789  */
1790 int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1791                     struct drbd_peer_request *peer_req)
1792 {
1793         struct drbd_device *device = peer_device->device;
1794         struct drbd_socket *sock;
1795         struct p_data *p;
1796         int err;
1797         int digest_size;
1798
1799         sock = &peer_device->connection->data;
1800         p = drbd_prepare_command(peer_device, sock);
1801
1802         digest_size = peer_device->connection->integrity_tfm ?
1803                       crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1804
1805         if (!p)
1806                 return -EIO;
1807         p->sector = cpu_to_be64(peer_req->i.sector);
1808         p->block_id = peer_req->block_id;
1809         p->seq_num = 0;  /* unused */
1810         p->dp_flags = 0;
1811         if (digest_size)
1812                 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1813         err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1814         if (!err)
1815                 err = _drbd_send_zc_ee(peer_device, peer_req);
1816         mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1817
1818         return err;
1819 }
1820
1821 int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1822 {
1823         struct drbd_socket *sock;
1824         struct p_block_desc *p;
1825
1826         sock = &peer_device->connection->data;
1827         p = drbd_prepare_command(peer_device, sock);
1828         if (!p)
1829                 return -EIO;
1830         p->sector = cpu_to_be64(req->i.sector);
1831         p->blksize = cpu_to_be32(req->i.size);
1832         return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1833 }
1834
1835 /*
1836   drbd_send distinguishes two cases:
1837
1838   Packets sent via the data socket "sock"
1839   and packets sent via the meta data socket "msock"
1840
1841                     sock                      msock
1842   -----------------+-------------------------+------------------------------
1843   timeout           conf.timeout / 2          conf.timeout / 2
1844   timeout action    send a ping via msock     Abort communication
1845                                               and close all sockets
1846 */
1847
1848 /*
1849  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1850  */
1851 int drbd_send(struct drbd_connection *connection, struct socket *sock,
1852               void *buf, size_t size, unsigned msg_flags)
1853 {
1854         struct kvec iov = {.iov_base = buf, .iov_len = size};
1855         struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
1856         int rv, sent = 0;
1857
1858         if (!sock)
1859                 return -EBADR;
1860
1861         /* THINK  if (signal_pending) return ... ? */
1862
1863         iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
1864
1865         if (sock == connection->data.socket) {
1866                 rcu_read_lock();
1867                 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1868                 rcu_read_unlock();
1869                 drbd_update_congested(connection);
1870         }
1871         do {
1872                 rv = sock_sendmsg(sock, &msg);
1873                 if (rv == -EAGAIN) {
1874                         if (we_should_drop_the_connection(connection, sock))
1875                                 break;
1876                         else
1877                                 continue;
1878                 }
1879                 if (rv == -EINTR) {
1880                         flush_signals(current);
1881                         rv = 0;
1882                 }
1883                 if (rv < 0)
1884                         break;
1885                 sent += rv;
1886         } while (sent < size);
1887
1888         if (sock == connection->data.socket)
1889                 clear_bit(NET_CONGESTED, &connection->flags);
1890
1891         if (rv <= 0) {
1892                 if (rv != -EAGAIN) {
1893                         drbd_err(connection, "%s_sendmsg returned %d\n",
1894                                  sock == connection->meta.socket ? "msock" : "sock",
1895                                  rv);
1896                         conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1897                 } else
1898                         conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1899         }
1900
1901         return sent;
1902 }
1903
1904 /**
1905  * drbd_send_all  -  Send an entire buffer
1906  *
1907  * Returns 0 upon success and a negative error value otherwise.
1908  */
1909 int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1910                   size_t size, unsigned msg_flags)
1911 {
1912         int err;
1913
1914         err = drbd_send(connection, sock, buffer, size, msg_flags);
1915         if (err < 0)
1916                 return err;
1917         if (err != size)
1918                 return -EIO;
1919         return 0;
1920 }
1921
1922 static int drbd_open(struct block_device *bdev, fmode_t mode)
1923 {
1924         struct drbd_device *device = bdev->bd_disk->private_data;
1925         unsigned long flags;
1926         int rv = 0;
1927
1928         mutex_lock(&drbd_main_mutex);
1929         spin_lock_irqsave(&device->resource->req_lock, flags);
1930         /* to have a stable device->state.role
1931          * and no race with updating open_cnt */
1932
1933         if (device->state.role != R_PRIMARY) {
1934                 if (mode & FMODE_WRITE)
1935                         rv = -EROFS;
1936                 else if (!drbd_allow_oos)
1937                         rv = -EMEDIUMTYPE;
1938         }
1939
1940         if (!rv)
1941                 device->open_cnt++;
1942         spin_unlock_irqrestore(&device->resource->req_lock, flags);
1943         mutex_unlock(&drbd_main_mutex);
1944
1945         return rv;
1946 }
1947
1948 static void drbd_release(struct gendisk *gd, fmode_t mode)
1949 {
1950         struct drbd_device *device = gd->private_data;
1951         mutex_lock(&drbd_main_mutex);
1952         device->open_cnt--;
1953         mutex_unlock(&drbd_main_mutex);
1954 }
1955
1956 /* need to hold resource->req_lock */
1957 void drbd_queue_unplug(struct drbd_device *device)
1958 {
1959         if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1960                 D_ASSERT(device, device->state.role == R_PRIMARY);
1961                 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1962                         drbd_queue_work_if_unqueued(
1963                                 &first_peer_device(device)->connection->sender_work,
1964                                 &device->unplug_work);
1965                 }
1966         }
1967 }
1968
1969 static void drbd_set_defaults(struct drbd_device *device)
1970 {
1971         /* Beware! The actual layout differs
1972          * between big endian and little endian */
1973         device->state = (union drbd_dev_state) {
1974                 { .role = R_SECONDARY,
1975                   .peer = R_UNKNOWN,
1976                   .conn = C_STANDALONE,
1977                   .disk = D_DISKLESS,
1978                   .pdsk = D_UNKNOWN,
1979                 } };
1980 }
1981
1982 void drbd_init_set_defaults(struct drbd_device *device)
1983 {
1984         /* the memset(,0,) did most of this.
1985          * note: only assignments, no allocation in here */
1986
1987         drbd_set_defaults(device);
1988
1989         atomic_set(&device->ap_bio_cnt, 0);
1990         atomic_set(&device->ap_actlog_cnt, 0);
1991         atomic_set(&device->ap_pending_cnt, 0);
1992         atomic_set(&device->rs_pending_cnt, 0);
1993         atomic_set(&device->unacked_cnt, 0);
1994         atomic_set(&device->local_cnt, 0);
1995         atomic_set(&device->pp_in_use_by_net, 0);
1996         atomic_set(&device->rs_sect_in, 0);
1997         atomic_set(&device->rs_sect_ev, 0);
1998         atomic_set(&device->ap_in_flight, 0);
1999         atomic_set(&device->md_io.in_use, 0);
2000
2001         mutex_init(&device->own_state_mutex);
2002         device->state_mutex = &device->own_state_mutex;
2003
2004         spin_lock_init(&device->al_lock);
2005         spin_lock_init(&device->peer_seq_lock);
2006
2007         INIT_LIST_HEAD(&device->active_ee);
2008         INIT_LIST_HEAD(&device->sync_ee);
2009         INIT_LIST_HEAD(&device->done_ee);
2010         INIT_LIST_HEAD(&device->read_ee);
2011         INIT_LIST_HEAD(&device->net_ee);
2012         INIT_LIST_HEAD(&device->resync_reads);
2013         INIT_LIST_HEAD(&device->resync_work.list);
2014         INIT_LIST_HEAD(&device->unplug_work.list);
2015         INIT_LIST_HEAD(&device->bm_io_work.w.list);
2016         INIT_LIST_HEAD(&device->pending_master_completion[0]);
2017         INIT_LIST_HEAD(&device->pending_master_completion[1]);
2018         INIT_LIST_HEAD(&device->pending_completion[0]);
2019         INIT_LIST_HEAD(&device->pending_completion[1]);
2020
2021         device->resync_work.cb  = w_resync_timer;
2022         device->unplug_work.cb  = w_send_write_hint;
2023         device->bm_io_work.w.cb = w_bitmap_io;
2024
2025         timer_setup(&device->resync_timer, resync_timer_fn, 0);
2026         timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
2027         timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
2028         timer_setup(&device->request_timer, request_timer_fn, 0);
2029
2030         init_waitqueue_head(&device->misc_wait);
2031         init_waitqueue_head(&device->state_wait);
2032         init_waitqueue_head(&device->ee_wait);
2033         init_waitqueue_head(&device->al_wait);
2034         init_waitqueue_head(&device->seq_wait);
2035
2036         device->resync_wenr = LC_FREE;
2037         device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2038         device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2039 }
2040
2041 void drbd_device_cleanup(struct drbd_device *device)
2042 {
2043         int i;
2044         if (first_peer_device(device)->connection->receiver.t_state != NONE)
2045                 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2046                                 first_peer_device(device)->connection->receiver.t_state);
2047
2048         device->al_writ_cnt  =
2049         device->bm_writ_cnt  =
2050         device->read_cnt     =
2051         device->recv_cnt     =
2052         device->send_cnt     =
2053         device->writ_cnt     =
2054         device->p_size       =
2055         device->rs_start     =
2056         device->rs_total     =
2057         device->rs_failed    = 0;
2058         device->rs_last_events = 0;
2059         device->rs_last_sect_ev = 0;
2060         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2061                 device->rs_mark_left[i] = 0;
2062                 device->rs_mark_time[i] = 0;
2063         }
2064         D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2065
2066         drbd_set_my_capacity(device, 0);
2067         if (device->bitmap) {
2068                 /* maybe never allocated. */
2069                 drbd_bm_resize(device, 0, 1);
2070                 drbd_bm_cleanup(device);
2071         }
2072
2073         drbd_backing_dev_free(device, device->ldev);
2074         device->ldev = NULL;
2075
2076         clear_bit(AL_SUSPENDED, &device->flags);
2077
2078         D_ASSERT(device, list_empty(&device->active_ee));
2079         D_ASSERT(device, list_empty(&device->sync_ee));
2080         D_ASSERT(device, list_empty(&device->done_ee));
2081         D_ASSERT(device, list_empty(&device->read_ee));
2082         D_ASSERT(device, list_empty(&device->net_ee));
2083         D_ASSERT(device, list_empty(&device->resync_reads));
2084         D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2085         D_ASSERT(device, list_empty(&device->resync_work.list));
2086         D_ASSERT(device, list_empty(&device->unplug_work.list));
2087
2088         drbd_set_defaults(device);
2089 }
2090
2091
2092 static void drbd_destroy_mempools(void)
2093 {
2094         struct page *page;
2095
2096         while (drbd_pp_pool) {
2097                 page = drbd_pp_pool;
2098                 drbd_pp_pool = (struct page *)page_private(page);
2099                 __free_page(page);
2100                 drbd_pp_vacant--;
2101         }
2102
2103         /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2104
2105         bioset_exit(&drbd_io_bio_set);
2106         bioset_exit(&drbd_md_io_bio_set);
2107         mempool_exit(&drbd_md_io_page_pool);
2108         mempool_exit(&drbd_ee_mempool);
2109         mempool_exit(&drbd_request_mempool);
2110         kmem_cache_destroy(drbd_ee_cache);
2111         kmem_cache_destroy(drbd_request_cache);
2112         kmem_cache_destroy(drbd_bm_ext_cache);
2113         kmem_cache_destroy(drbd_al_ext_cache);
2114
2115         drbd_ee_cache        = NULL;
2116         drbd_request_cache   = NULL;
2117         drbd_bm_ext_cache    = NULL;
2118         drbd_al_ext_cache    = NULL;
2119
2120         return;
2121 }
2122
2123 static int drbd_create_mempools(void)
2124 {
2125         struct page *page;
2126         const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
2127         int i, ret;
2128
2129         /* caches */
2130         drbd_request_cache = kmem_cache_create(
2131                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2132         if (drbd_request_cache == NULL)
2133                 goto Enomem;
2134
2135         drbd_ee_cache = kmem_cache_create(
2136                 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2137         if (drbd_ee_cache == NULL)
2138                 goto Enomem;
2139
2140         drbd_bm_ext_cache = kmem_cache_create(
2141                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2142         if (drbd_bm_ext_cache == NULL)
2143                 goto Enomem;
2144
2145         drbd_al_ext_cache = kmem_cache_create(
2146                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2147         if (drbd_al_ext_cache == NULL)
2148                 goto Enomem;
2149
2150         /* mempools */
2151         ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
2152         if (ret)
2153                 goto Enomem;
2154
2155         ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
2156                           BIOSET_NEED_BVECS);
2157         if (ret)
2158                 goto Enomem;
2159
2160         ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
2161         if (ret)
2162                 goto Enomem;
2163
2164         ret = mempool_init_slab_pool(&drbd_request_mempool, number,
2165                                      drbd_request_cache);
2166         if (ret)
2167                 goto Enomem;
2168
2169         ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
2170         if (ret)
2171                 goto Enomem;
2172
2173         /* drbd's page pool */
2174         spin_lock_init(&drbd_pp_lock);
2175
2176         for (i = 0; i < number; i++) {
2177                 page = alloc_page(GFP_HIGHUSER);
2178                 if (!page)
2179                         goto Enomem;
2180                 set_page_private(page, (unsigned long)drbd_pp_pool);
2181                 drbd_pp_pool = page;
2182         }
2183         drbd_pp_vacant = number;
2184
2185         return 0;
2186
2187 Enomem:
2188         drbd_destroy_mempools(); /* in case we allocated some */
2189         return -ENOMEM;
2190 }
2191
2192 static void drbd_release_all_peer_reqs(struct drbd_device *device)
2193 {
2194         int rr;
2195
2196         rr = drbd_free_peer_reqs(device, &device->active_ee);
2197         if (rr)
2198                 drbd_err(device, "%d EEs in active list found!\n", rr);
2199
2200         rr = drbd_free_peer_reqs(device, &device->sync_ee);
2201         if (rr)
2202                 drbd_err(device, "%d EEs in sync list found!\n", rr);
2203
2204         rr = drbd_free_peer_reqs(device, &device->read_ee);
2205         if (rr)
2206                 drbd_err(device, "%d EEs in read list found!\n", rr);
2207
2208         rr = drbd_free_peer_reqs(device, &device->done_ee);
2209         if (rr)
2210                 drbd_err(device, "%d EEs in done list found!\n", rr);
2211
2212         rr = drbd_free_peer_reqs(device, &device->net_ee);
2213         if (rr)
2214                 drbd_err(device, "%d EEs in net list found!\n", rr);
2215 }
2216
2217 /* caution. no locking. */
2218 void drbd_destroy_device(struct kref *kref)
2219 {
2220         struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2221         struct drbd_resource *resource = device->resource;
2222         struct drbd_peer_device *peer_device, *tmp_peer_device;
2223
2224         del_timer_sync(&device->request_timer);
2225
2226         /* paranoia asserts */
2227         D_ASSERT(device, device->open_cnt == 0);
2228         /* end paranoia asserts */
2229
2230         /* cleanup stuff that may have been allocated during
2231          * device (re-)configuration or state changes */
2232
2233         if (device->this_bdev)
2234                 bdput(device->this_bdev);
2235
2236         drbd_backing_dev_free(device, device->ldev);
2237         device->ldev = NULL;
2238
2239         drbd_release_all_peer_reqs(device);
2240
2241         lc_destroy(device->act_log);
2242         lc_destroy(device->resync);
2243
2244         kfree(device->p_uuid);
2245         /* device->p_uuid = NULL; */
2246
2247         if (device->bitmap) /* should no longer be there. */
2248                 drbd_bm_cleanup(device);
2249         __free_page(device->md_io.page);
2250         put_disk(device->vdisk);
2251         blk_cleanup_queue(device->rq_queue);
2252         kfree(device->rs_plan_s);
2253
2254         /* not for_each_connection(connection, resource):
2255          * those may have been cleaned up and disassociated already.
2256          */
2257         for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2258                 kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2259                 kfree(peer_device);
2260         }
2261         memset(device, 0xfd, sizeof(*device));
2262         kfree(device);
2263         kref_put(&resource->kref, drbd_destroy_resource);
2264 }
2265
2266 /* One global retry thread, if we need to push back some bio and have it
2267  * reinserted through our make request function.
2268  */
2269 static struct retry_worker {
2270         struct workqueue_struct *wq;
2271         struct work_struct worker;
2272
2273         spinlock_t lock;
2274         struct list_head writes;
2275 } retry;
2276
2277 static void do_retry(struct work_struct *ws)
2278 {
2279         struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2280         LIST_HEAD(writes);
2281         struct drbd_request *req, *tmp;
2282
2283         spin_lock_irq(&retry->lock);
2284         list_splice_init(&retry->writes, &writes);
2285         spin_unlock_irq(&retry->lock);
2286
2287         list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2288                 struct drbd_device *device = req->device;
2289                 struct bio *bio = req->master_bio;
2290                 unsigned long start_jif = req->start_jif;
2291                 bool expected;
2292
2293                 expected =
2294                         expect(atomic_read(&req->completion_ref) == 0) &&
2295                         expect(req->rq_state & RQ_POSTPONED) &&
2296                         expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2297                                 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2298
2299                 if (!expected)
2300                         drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2301                                 req, atomic_read(&req->completion_ref),
2302                                 req->rq_state);
2303
2304                 /* We still need to put one kref associated with the
2305                  * "completion_ref" going zero in the code path that queued it
2306                  * here.  The request object may still be referenced by a
2307                  * frozen local req->private_bio, in case we force-detached.
2308                  */
2309                 kref_put(&req->kref, drbd_req_destroy);
2310
2311                 /* A single suspended or otherwise blocking device may stall
2312                  * all others as well.  Fortunately, this code path is to
2313                  * recover from a situation that "should not happen":
2314                  * concurrent writes in multi-primary setup.
2315                  * In a "normal" lifecycle, this workqueue is supposed to be
2316                  * destroyed without ever doing anything.
2317                  * If it turns out to be an issue anyways, we can do per
2318                  * resource (replication group) or per device (minor) retry
2319                  * workqueues instead.
2320                  */
2321
2322                 /* We are not just doing generic_make_request(),
2323                  * as we want to keep the start_time information. */
2324                 inc_ap_bio(device);
2325                 __drbd_make_request(device, bio, start_jif);
2326         }
2327 }
2328
2329 /* called via drbd_req_put_completion_ref(),
2330  * holds resource->req_lock */
2331 void drbd_restart_request(struct drbd_request *req)
2332 {
2333         unsigned long flags;
2334         spin_lock_irqsave(&retry.lock, flags);
2335         list_move_tail(&req->tl_requests, &retry.writes);
2336         spin_unlock_irqrestore(&retry.lock, flags);
2337
2338         /* Drop the extra reference that would otherwise
2339          * have been dropped by complete_master_bio.
2340          * do_retry() needs to grab a new one. */
2341         dec_ap_bio(req->device);
2342
2343         queue_work(retry.wq, &retry.worker);
2344 }
2345
2346 void drbd_destroy_resource(struct kref *kref)
2347 {
2348         struct drbd_resource *resource =
2349                 container_of(kref, struct drbd_resource, kref);
2350
2351         idr_destroy(&resource->devices);
2352         free_cpumask_var(resource->cpu_mask);
2353         kfree(resource->name);
2354         memset(resource, 0xf2, sizeof(*resource));
2355         kfree(resource);
2356 }
2357
2358 void drbd_free_resource(struct drbd_resource *resource)
2359 {
2360         struct drbd_connection *connection, *tmp;
2361
2362         for_each_connection_safe(connection, tmp, resource) {
2363                 list_del(&connection->connections);
2364                 drbd_debugfs_connection_cleanup(connection);
2365                 kref_put(&connection->kref, drbd_destroy_connection);
2366         }
2367         drbd_debugfs_resource_cleanup(resource);
2368         kref_put(&resource->kref, drbd_destroy_resource);
2369 }
2370
2371 static void drbd_cleanup(void)
2372 {
2373         unsigned int i;
2374         struct drbd_device *device;
2375         struct drbd_resource *resource, *tmp;
2376
2377         /* first remove proc,
2378          * drbdsetup uses it's presence to detect
2379          * whether DRBD is loaded.
2380          * If we would get stuck in proc removal,
2381          * but have netlink already deregistered,
2382          * some drbdsetup commands may wait forever
2383          * for an answer.
2384          */
2385         if (drbd_proc)
2386                 remove_proc_entry("drbd", NULL);
2387
2388         if (retry.wq)
2389                 destroy_workqueue(retry.wq);
2390
2391         drbd_genl_unregister();
2392
2393         idr_for_each_entry(&drbd_devices, device, i)
2394                 drbd_delete_device(device);
2395
2396         /* not _rcu since, no other updater anymore. Genl already unregistered */
2397         for_each_resource_safe(resource, tmp, &drbd_resources) {
2398                 list_del(&resource->resources);
2399                 drbd_free_resource(resource);
2400         }
2401
2402         drbd_debugfs_cleanup();
2403
2404         drbd_destroy_mempools();
2405         unregister_blkdev(DRBD_MAJOR, "drbd");
2406
2407         idr_destroy(&drbd_devices);
2408
2409         pr_info("module cleanup done.\n");
2410 }
2411
2412 /**
2413  * drbd_congested() - Callback for the flusher thread
2414  * @congested_data:     User data
2415  * @bdi_bits:           Bits the BDI flusher thread is currently interested in
2416  *
2417  * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
2418  */
2419 static int drbd_congested(void *congested_data, int bdi_bits)
2420 {
2421         struct drbd_device *device = congested_data;
2422         struct request_queue *q;
2423         char reason = '-';
2424         int r = 0;
2425
2426         if (!may_inc_ap_bio(device)) {
2427                 /* DRBD has frozen IO */
2428                 r = bdi_bits;
2429                 reason = 'd';
2430                 goto out;
2431         }
2432
2433         if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2434                 r |= (1 << WB_async_congested);
2435                 /* Without good local data, we would need to read from remote,
2436                  * and that would need the worker thread as well, which is
2437                  * currently blocked waiting for that usermode helper to
2438                  * finish.
2439                  */
2440                 if (!get_ldev_if_state(device, D_UP_TO_DATE))
2441                         r |= (1 << WB_sync_congested);
2442                 else
2443                         put_ldev(device);
2444                 r &= bdi_bits;
2445                 reason = 'c';
2446                 goto out;
2447         }
2448
2449         if (get_ldev(device)) {
2450                 q = bdev_get_queue(device->ldev->backing_bdev);
2451                 r = bdi_congested(q->backing_dev_info, bdi_bits);
2452                 put_ldev(device);
2453                 if (r)
2454                         reason = 'b';
2455         }
2456
2457         if (bdi_bits & (1 << WB_async_congested) &&
2458             test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2459                 r |= (1 << WB_async_congested);
2460                 reason = reason == 'b' ? 'a' : 'n';
2461         }
2462
2463 out:
2464         device->congestion_reason = reason;
2465         return r;
2466 }
2467
2468 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2469 {
2470         spin_lock_init(&wq->q_lock);
2471         INIT_LIST_HEAD(&wq->q);
2472         init_waitqueue_head(&wq->q_wait);
2473 }
2474
2475 struct completion_work {
2476         struct drbd_work w;
2477         struct completion done;
2478 };
2479
2480 static int w_complete(struct drbd_work *w, int cancel)
2481 {
2482         struct completion_work *completion_work =
2483                 container_of(w, struct completion_work, w);
2484
2485         complete(&completion_work->done);
2486         return 0;
2487 }
2488
2489 void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2490 {
2491         struct completion_work completion_work;
2492
2493         completion_work.w.cb = w_complete;
2494         init_completion(&completion_work.done);
2495         drbd_queue_work(work_queue, &completion_work.w);
2496         wait_for_completion(&completion_work.done);
2497 }
2498
2499 struct drbd_resource *drbd_find_resource(const char *name)
2500 {
2501         struct drbd_resource *resource;
2502
2503         if (!name || !name[0])
2504                 return NULL;
2505
2506         rcu_read_lock();
2507         for_each_resource_rcu(resource, &drbd_resources) {
2508                 if (!strcmp(resource->name, name)) {
2509                         kref_get(&resource->kref);
2510                         goto found;
2511                 }
2512         }
2513         resource = NULL;
2514 found:
2515         rcu_read_unlock();
2516         return resource;
2517 }
2518
2519 struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2520                                      void *peer_addr, int peer_addr_len)
2521 {
2522         struct drbd_resource *resource;
2523         struct drbd_connection *connection;
2524
2525         rcu_read_lock();
2526         for_each_resource_rcu(resource, &drbd_resources) {
2527                 for_each_connection_rcu(connection, resource) {
2528                         if (connection->my_addr_len == my_addr_len &&
2529                             connection->peer_addr_len == peer_addr_len &&
2530                             !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2531                             !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2532                                 kref_get(&connection->kref);
2533                                 goto found;
2534                         }
2535                 }
2536         }
2537         connection = NULL;
2538 found:
2539         rcu_read_unlock();
2540         return connection;
2541 }
2542
2543 static int drbd_alloc_socket(struct drbd_socket *socket)
2544 {
2545         socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2546         if (!socket->rbuf)
2547                 return -ENOMEM;
2548         socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2549         if (!socket->sbuf)
2550                 return -ENOMEM;
2551         return 0;
2552 }
2553
2554 static void drbd_free_socket(struct drbd_socket *socket)
2555 {
2556         free_page((unsigned long) socket->sbuf);
2557         free_page((unsigned long) socket->rbuf);
2558 }
2559
2560 void conn_free_crypto(struct drbd_connection *connection)
2561 {
2562         drbd_free_sock(connection);
2563
2564         crypto_free_ahash(connection->csums_tfm);
2565         crypto_free_ahash(connection->verify_tfm);
2566         crypto_free_shash(connection->cram_hmac_tfm);
2567         crypto_free_ahash(connection->integrity_tfm);
2568         crypto_free_ahash(connection->peer_integrity_tfm);
2569         kfree(connection->int_dig_in);
2570         kfree(connection->int_dig_vv);
2571
2572         connection->csums_tfm = NULL;
2573         connection->verify_tfm = NULL;
2574         connection->cram_hmac_tfm = NULL;
2575         connection->integrity_tfm = NULL;
2576         connection->peer_integrity_tfm = NULL;
2577         connection->int_dig_in = NULL;
2578         connection->int_dig_vv = NULL;
2579 }
2580
2581 int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2582 {
2583         struct drbd_connection *connection;
2584         cpumask_var_t new_cpu_mask;
2585         int err;
2586
2587         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2588                 return -ENOMEM;
2589
2590         /* silently ignore cpu mask on UP kernel */
2591         if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2592                 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2593                                    cpumask_bits(new_cpu_mask), nr_cpu_ids);
2594                 if (err == -EOVERFLOW) {
2595                         /* So what. mask it out. */
2596                         cpumask_var_t tmp_cpu_mask;
2597                         if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2598                                 cpumask_setall(tmp_cpu_mask);
2599                                 cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2600                                 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2601                                         res_opts->cpu_mask,
2602                                         strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2603                                         nr_cpu_ids);
2604                                 free_cpumask_var(tmp_cpu_mask);
2605                                 err = 0;
2606                         }
2607                 }
2608                 if (err) {
2609                         drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2610                         /* retcode = ERR_CPU_MASK_PARSE; */
2611                         goto fail;
2612                 }
2613         }
2614         resource->res_opts = *res_opts;
2615         if (cpumask_empty(new_cpu_mask))
2616                 drbd_calc_cpu_mask(&new_cpu_mask);
2617         if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2618                 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2619                 for_each_connection_rcu(connection, resource) {
2620                         connection->receiver.reset_cpu_mask = 1;
2621                         connection->ack_receiver.reset_cpu_mask = 1;
2622                         connection->worker.reset_cpu_mask = 1;
2623                 }
2624         }
2625         err = 0;
2626
2627 fail:
2628         free_cpumask_var(new_cpu_mask);
2629         return err;
2630
2631 }
2632
2633 struct drbd_resource *drbd_create_resource(const char *name)
2634 {
2635         struct drbd_resource *resource;
2636
2637         resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2638         if (!resource)
2639                 goto fail;
2640         resource->name = kstrdup(name, GFP_KERNEL);
2641         if (!resource->name)
2642                 goto fail_free_resource;
2643         if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2644                 goto fail_free_name;
2645         kref_init(&resource->kref);
2646         idr_init(&resource->devices);
2647         INIT_LIST_HEAD(&resource->connections);
2648         resource->write_ordering = WO_BDEV_FLUSH;
2649         list_add_tail_rcu(&resource->resources, &drbd_resources);
2650         mutex_init(&resource->conf_update);
2651         mutex_init(&resource->adm_mutex);
2652         spin_lock_init(&resource->req_lock);
2653         drbd_debugfs_resource_add(resource);
2654         return resource;
2655
2656 fail_free_name:
2657         kfree(resource->name);
2658 fail_free_resource:
2659         kfree(resource);
2660 fail:
2661         return NULL;
2662 }
2663
2664 /* caller must be under adm_mutex */
2665 struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2666 {
2667         struct drbd_resource *resource;
2668         struct drbd_connection *connection;
2669
2670         connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2671         if (!connection)
2672                 return NULL;
2673
2674         if (drbd_alloc_socket(&connection->data))
2675                 goto fail;
2676         if (drbd_alloc_socket(&connection->meta))
2677                 goto fail;
2678
2679         connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2680         if (!connection->current_epoch)
2681                 goto fail;
2682
2683         INIT_LIST_HEAD(&connection->transfer_log);
2684
2685         INIT_LIST_HEAD(&connection->current_epoch->list);
2686         connection->epochs = 1;
2687         spin_lock_init(&connection->epoch_lock);
2688
2689         connection->send.seen_any_write_yet = false;
2690         connection->send.current_epoch_nr = 0;
2691         connection->send.current_epoch_writes = 0;
2692
2693         resource = drbd_create_resource(name);
2694         if (!resource)
2695                 goto fail;
2696
2697         connection->cstate = C_STANDALONE;
2698         mutex_init(&connection->cstate_mutex);
2699         init_waitqueue_head(&connection->ping_wait);
2700         idr_init(&connection->peer_devices);
2701
2702         drbd_init_workqueue(&connection->sender_work);
2703         mutex_init(&connection->data.mutex);
2704         mutex_init(&connection->meta.mutex);
2705
2706         drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2707         connection->receiver.connection = connection;
2708         drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2709         connection->worker.connection = connection;
2710         drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2711         connection->ack_receiver.connection = connection;
2712
2713         kref_init(&connection->kref);
2714
2715         connection->resource = resource;
2716
2717         if (set_resource_options(resource, res_opts))
2718                 goto fail_resource;
2719
2720         kref_get(&resource->kref);
2721         list_add_tail_rcu(&connection->connections, &resource->connections);
2722         drbd_debugfs_connection_add(connection);
2723         return connection;
2724
2725 fail_resource:
2726         list_del(&resource->resources);
2727         drbd_free_resource(resource);
2728 fail:
2729         kfree(connection->current_epoch);
2730         drbd_free_socket(&connection->meta);
2731         drbd_free_socket(&connection->data);
2732         kfree(connection);
2733         return NULL;
2734 }
2735
2736 void drbd_destroy_connection(struct kref *kref)
2737 {
2738         struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2739         struct drbd_resource *resource = connection->resource;
2740
2741         if (atomic_read(&connection->current_epoch->epoch_size) !=  0)
2742                 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2743         kfree(connection->current_epoch);
2744
2745         idr_destroy(&connection->peer_devices);
2746
2747         drbd_free_socket(&connection->meta);
2748         drbd_free_socket(&connection->data);
2749         kfree(connection->int_dig_in);
2750         kfree(connection->int_dig_vv);
2751         memset(connection, 0xfc, sizeof(*connection));
2752         kfree(connection);
2753         kref_put(&resource->kref, drbd_destroy_resource);
2754 }
2755
2756 static int init_submitter(struct drbd_device *device)
2757 {
2758         /* opencoded create_singlethread_workqueue(),
2759          * to be able to say "drbd%d", ..., minor */
2760         device->submit.wq =
2761                 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2762         if (!device->submit.wq)
2763                 return -ENOMEM;
2764
2765         INIT_WORK(&device->submit.worker, do_submit);
2766         INIT_LIST_HEAD(&device->submit.writes);
2767         return 0;
2768 }
2769
2770 enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2771 {
2772         struct drbd_resource *resource = adm_ctx->resource;
2773         struct drbd_connection *connection;
2774         struct drbd_device *device;
2775         struct drbd_peer_device *peer_device, *tmp_peer_device;
2776         struct gendisk *disk;
2777         struct request_queue *q;
2778         int id;
2779         int vnr = adm_ctx->volume;
2780         enum drbd_ret_code err = ERR_NOMEM;
2781
2782         device = minor_to_device(minor);
2783         if (device)
2784                 return ERR_MINOR_OR_VOLUME_EXISTS;
2785
2786         /* GFP_KERNEL, we are outside of all write-out paths */
2787         device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2788         if (!device)
2789                 return ERR_NOMEM;
2790         kref_init(&device->kref);
2791
2792         kref_get(&resource->kref);
2793         device->resource = resource;
2794         device->minor = minor;
2795         device->vnr = vnr;
2796
2797         drbd_init_set_defaults(device);
2798
2799         q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, &resource->req_lock);
2800         if (!q)
2801                 goto out_no_q;
2802         device->rq_queue = q;
2803         q->queuedata   = device;
2804
2805         disk = alloc_disk(1);
2806         if (!disk)
2807                 goto out_no_disk;
2808         device->vdisk = disk;
2809
2810         set_disk_ro(disk, true);
2811
2812         disk->queue = q;
2813         disk->major = DRBD_MAJOR;
2814         disk->first_minor = minor;
2815         disk->fops = &drbd_ops;
2816         sprintf(disk->disk_name, "drbd%d", minor);
2817         disk->private_data = device;
2818
2819         device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2820         /* we have no partitions. we contain only ourselves. */
2821         device->this_bdev->bd_contains = device->this_bdev;
2822
2823         q->backing_dev_info->congested_fn = drbd_congested;
2824         q->backing_dev_info->congested_data = device;
2825
2826         blk_queue_make_request(q, drbd_make_request);
2827         blk_queue_write_cache(q, true, true);
2828         /* Setting the max_hw_sectors to an odd value of 8kibyte here
2829            This triggers a max_bio_size message upon first attach or connect */
2830         blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2831
2832         device->md_io.page = alloc_page(GFP_KERNEL);
2833         if (!device->md_io.page)
2834                 goto out_no_io_page;
2835
2836         if (drbd_bm_init(device))
2837                 goto out_no_bitmap;
2838         device->read_requests = RB_ROOT;
2839         device->write_requests = RB_ROOT;
2840
2841         id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2842         if (id < 0) {
2843                 if (id == -ENOSPC)
2844                         err = ERR_MINOR_OR_VOLUME_EXISTS;
2845                 goto out_no_minor_idr;
2846         }
2847         kref_get(&device->kref);
2848
2849         id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2850         if (id < 0) {
2851                 if (id == -ENOSPC)
2852                         err = ERR_MINOR_OR_VOLUME_EXISTS;
2853                 goto out_idr_remove_minor;
2854         }
2855         kref_get(&device->kref);
2856
2857         INIT_LIST_HEAD(&device->peer_devices);
2858         INIT_LIST_HEAD(&device->pending_bitmap_io);
2859         for_each_connection(connection, resource) {
2860                 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2861                 if (!peer_device)
2862                         goto out_idr_remove_from_resource;
2863                 peer_device->connection = connection;
2864                 peer_device->device = device;
2865
2866                 list_add(&peer_device->peer_devices, &device->peer_devices);
2867                 kref_get(&device->kref);
2868
2869                 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2870                 if (id < 0) {
2871                         if (id == -ENOSPC)
2872                                 err = ERR_INVALID_REQUEST;
2873                         goto out_idr_remove_from_resource;
2874                 }
2875                 kref_get(&connection->kref);
2876                 INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2877         }
2878
2879         if (init_submitter(device)) {
2880                 err = ERR_NOMEM;
2881                 goto out_idr_remove_vol;
2882         }
2883
2884         add_disk(disk);
2885
2886         /* inherit the connection state */
2887         device->state.conn = first_connection(resource)->cstate;
2888         if (device->state.conn == C_WF_REPORT_PARAMS) {
2889                 for_each_peer_device(peer_device, device)
2890                         drbd_connected(peer_device);
2891         }
2892         /* move to create_peer_device() */
2893         for_each_peer_device(peer_device, device)
2894                 drbd_debugfs_peer_device_add(peer_device);
2895         drbd_debugfs_device_add(device);
2896         return NO_ERROR;
2897
2898 out_idr_remove_vol:
2899         idr_remove(&connection->peer_devices, vnr);
2900 out_idr_remove_from_resource:
2901         for_each_connection(connection, resource) {
2902                 peer_device = idr_remove(&connection->peer_devices, vnr);
2903                 if (peer_device)
2904                         kref_put(&connection->kref, drbd_destroy_connection);
2905         }
2906         for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2907                 list_del(&peer_device->peer_devices);
2908                 kfree(peer_device);
2909         }
2910         idr_remove(&resource->devices, vnr);
2911 out_idr_remove_minor:
2912         idr_remove(&drbd_devices, minor);
2913         synchronize_rcu();
2914 out_no_minor_idr:
2915         drbd_bm_cleanup(device);
2916 out_no_bitmap:
2917         __free_page(device->md_io.page);
2918 out_no_io_page:
2919         put_disk(disk);
2920 out_no_disk:
2921         blk_cleanup_queue(q);
2922 out_no_q:
2923         kref_put(&resource->kref, drbd_destroy_resource);
2924         kfree(device);
2925         return err;
2926 }
2927
2928 void drbd_delete_device(struct drbd_device *device)
2929 {
2930         struct drbd_resource *resource = device->resource;
2931         struct drbd_connection *connection;
2932         struct drbd_peer_device *peer_device;
2933
2934         /* move to free_peer_device() */
2935         for_each_peer_device(peer_device, device)
2936                 drbd_debugfs_peer_device_cleanup(peer_device);
2937         drbd_debugfs_device_cleanup(device);
2938         for_each_connection(connection, resource) {
2939                 idr_remove(&connection->peer_devices, device->vnr);
2940                 kref_put(&device->kref, drbd_destroy_device);
2941         }
2942         idr_remove(&resource->devices, device->vnr);
2943         kref_put(&device->kref, drbd_destroy_device);
2944         idr_remove(&drbd_devices, device_to_minor(device));
2945         kref_put(&device->kref, drbd_destroy_device);
2946         del_gendisk(device->vdisk);
2947         synchronize_rcu();
2948         kref_put(&device->kref, drbd_destroy_device);
2949 }
2950
2951 static int __init drbd_init(void)
2952 {
2953         int err;
2954
2955         if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
2956                 pr_err("invalid minor_count (%d)\n", drbd_minor_count);
2957 #ifdef MODULE
2958                 return -EINVAL;
2959 #else
2960                 drbd_minor_count = DRBD_MINOR_COUNT_DEF;
2961 #endif
2962         }
2963
2964         err = register_blkdev(DRBD_MAJOR, "drbd");
2965         if (err) {
2966                 pr_err("unable to register block device major %d\n",
2967                        DRBD_MAJOR);
2968                 return err;
2969         }
2970
2971         /*
2972          * allocate all necessary structs
2973          */
2974         init_waitqueue_head(&drbd_pp_wait);
2975
2976         drbd_proc = NULL; /* play safe for drbd_cleanup */
2977         idr_init(&drbd_devices);
2978
2979         mutex_init(&resources_mutex);
2980         INIT_LIST_HEAD(&drbd_resources);
2981
2982         err = drbd_genl_register();
2983         if (err) {
2984                 pr_err("unable to register generic netlink family\n");
2985                 goto fail;
2986         }
2987
2988         err = drbd_create_mempools();
2989         if (err)
2990                 goto fail;
2991
2992         err = -ENOMEM;
2993         drbd_proc = proc_create_single("drbd", S_IFREG | 0444 , NULL, drbd_seq_show);
2994         if (!drbd_proc) {
2995                 pr_err("unable to register proc file\n");
2996                 goto fail;
2997         }
2998
2999         retry.wq = create_singlethread_workqueue("drbd-reissue");
3000         if (!retry.wq) {
3001                 pr_err("unable to create retry workqueue\n");
3002                 goto fail;
3003         }
3004         INIT_WORK(&retry.worker, do_retry);
3005         spin_lock_init(&retry.lock);
3006         INIT_LIST_HEAD(&retry.writes);
3007
3008         if (drbd_debugfs_init())
3009                 pr_notice("failed to initialize debugfs -- will not be available\n");
3010
3011         pr_info("initialized. "
3012                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3013                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3014         pr_info("%s\n", drbd_buildtag());
3015         pr_info("registered as block device major %d\n", DRBD_MAJOR);
3016         return 0; /* Success! */
3017
3018 fail:
3019         drbd_cleanup();
3020         if (err == -ENOMEM)
3021                 pr_err("ran out of memory\n");
3022         else
3023                 pr_err("initialization failure\n");
3024         return err;
3025 }
3026
3027 static void drbd_free_one_sock(struct drbd_socket *ds)
3028 {
3029         struct socket *s;
3030         mutex_lock(&ds->mutex);
3031         s = ds->socket;
3032         ds->socket = NULL;
3033         mutex_unlock(&ds->mutex);
3034         if (s) {
3035                 /* so debugfs does not need to mutex_lock() */
3036                 synchronize_rcu();
3037                 kernel_sock_shutdown(s, SHUT_RDWR);
3038                 sock_release(s);
3039         }
3040 }
3041
3042 void drbd_free_sock(struct drbd_connection *connection)
3043 {
3044         if (connection->data.socket)
3045                 drbd_free_one_sock(&connection->data);
3046         if (connection->meta.socket)
3047                 drbd_free_one_sock(&connection->meta);
3048 }
3049
3050 /* meta data management */
3051
3052 void conn_md_sync(struct drbd_connection *connection)
3053 {
3054         struct drbd_peer_device *peer_device;
3055         int vnr;
3056
3057         rcu_read_lock();
3058         idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
3059                 struct drbd_device *device = peer_device->device;
3060
3061                 kref_get(&device->kref);
3062                 rcu_read_unlock();
3063                 drbd_md_sync(device);
3064                 kref_put(&device->kref, drbd_destroy_device);
3065                 rcu_read_lock();
3066         }
3067         rcu_read_unlock();
3068 }
3069
3070 /* aligned 4kByte */
3071 struct meta_data_on_disk {
3072         u64 la_size_sect;      /* last agreed size. */
3073         u64 uuid[UI_SIZE];   /* UUIDs. */
3074         u64 device_uuid;
3075         u64 reserved_u64_1;
3076         u32 flags;             /* MDF */
3077         u32 magic;
3078         u32 md_size_sect;
3079         u32 al_offset;         /* offset to this block */
3080         u32 al_nr_extents;     /* important for restoring the AL (userspace) */
3081               /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3082         u32 bm_offset;         /* offset to the bitmap, from here */
3083         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3084         u32 la_peer_max_bio_size;   /* last peer max_bio_size */
3085
3086         /* see al_tr_number_to_on_disk_sector() */
3087         u32 al_stripes;
3088         u32 al_stripe_size_4k;
3089
3090         u8 reserved_u8[4096 - (7*8 + 10*4)];
3091 } __packed;
3092
3093
3094
3095 void drbd_md_write(struct drbd_device *device, void *b)
3096 {
3097         struct meta_data_on_disk *buffer = b;
3098         sector_t sector;
3099         int i;
3100
3101         memset(buffer, 0, sizeof(*buffer));
3102
3103         buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
3104         for (i = UI_CURRENT; i < UI_SIZE; i++)
3105                 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3106         buffer->flags = cpu_to_be32(device->ldev->md.flags);
3107         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3108
3109         buffer->md_size_sect  = cpu_to_be32(device->ldev->md.md_size_sect);
3110         buffer->al_offset     = cpu_to_be32(device->ldev->md.al_offset);
3111         buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3112         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3113         buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3114
3115         buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3116         buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3117
3118         buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3119         buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3120
3121         D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3122         sector = device->ldev->md.md_offset;
3123
3124         if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3125                 /* this was a try anyways ... */
3126                 drbd_err(device, "meta data update failed!\n");
3127                 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3128         }
3129 }
3130
3131 /**
3132  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3133  * @device:     DRBD device.
3134  */
3135 void drbd_md_sync(struct drbd_device *device)
3136 {
3137         struct meta_data_on_disk *buffer;
3138
3139         /* Don't accidentally change the DRBD meta data layout. */
3140         BUILD_BUG_ON(UI_SIZE != 4);
3141         BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3142
3143         del_timer(&device->md_sync_timer);
3144         /* timer may be rearmed by drbd_md_mark_dirty() now. */
3145         if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3146                 return;
3147
3148         /* We use here D_FAILED and not D_ATTACHING because we try to write
3149          * metadata even if we detach due to a disk failure! */
3150         if (!get_ldev_if_state(device, D_FAILED))
3151                 return;
3152
3153         buffer = drbd_md_get_buffer(device, __func__);
3154         if (!buffer)
3155                 goto out;
3156
3157         drbd_md_write(device, buffer);
3158
3159         /* Update device->ldev->md.la_size_sect,
3160          * since we updated it on metadata. */
3161         device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
3162
3163         drbd_md_put_buffer(device);
3164 out:
3165         put_ldev(device);
3166 }
3167
3168 static int check_activity_log_stripe_size(struct drbd_device *device,
3169                 struct meta_data_on_disk *on_disk,
3170                 struct drbd_md *in_core)
3171 {
3172         u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3173         u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3174         u64 al_size_4k;
3175
3176         /* both not set: default to old fixed size activity log */
3177         if (al_stripes == 0 && al_stripe_size_4k == 0) {
3178                 al_stripes = 1;
3179                 al_stripe_size_4k = MD_32kB_SECT/8;
3180         }
3181
3182         /* some paranoia plausibility checks */
3183
3184         /* we need both values to be set */
3185         if (al_stripes == 0 || al_stripe_size_4k == 0)
3186                 goto err;
3187
3188         al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3189
3190         /* Upper limit of activity log area, to avoid potential overflow
3191          * problems in al_tr_number_to_on_disk_sector(). As right now, more
3192          * than 72 * 4k blocks total only increases the amount of history,
3193          * limiting this arbitrarily to 16 GB is not a real limitation ;-)  */
3194         if (al_size_4k > (16 * 1024 * 1024/4))
3195                 goto err;
3196
3197         /* Lower limit: we need at least 8 transaction slots (32kB)
3198          * to not break existing setups */
3199         if (al_size_4k < MD_32kB_SECT/8)
3200                 goto err;
3201
3202         in_core->al_stripe_size_4k = al_stripe_size_4k;
3203         in_core->al_stripes = al_stripes;
3204         in_core->al_size_4k = al_size_4k;
3205
3206         return 0;
3207 err:
3208         drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3209                         al_stripes, al_stripe_size_4k);
3210         return -EINVAL;
3211 }
3212
3213 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3214 {
3215         sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3216         struct drbd_md *in_core = &bdev->md;
3217         s32 on_disk_al_sect;
3218         s32 on_disk_bm_sect;
3219
3220         /* The on-disk size of the activity log, calculated from offsets, and
3221          * the size of the activity log calculated from the stripe settings,
3222          * should match.
3223          * Though we could relax this a bit: it is ok, if the striped activity log
3224          * fits in the available on-disk activity log size.
3225          * Right now, that would break how resize is implemented.
3226          * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3227          * of possible unused padding space in the on disk layout. */
3228         if (in_core->al_offset < 0) {
3229                 if (in_core->bm_offset > in_core->al_offset)
3230                         goto err;
3231                 on_disk_al_sect = -in_core->al_offset;
3232                 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3233         } else {
3234                 if (in_core->al_offset != MD_4kB_SECT)
3235                         goto err;
3236                 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3237                         goto err;
3238
3239                 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3240                 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3241         }
3242
3243         /* old fixed size meta data is exactly that: fixed. */
3244         if (in_core->meta_dev_idx >= 0) {
3245                 if (in_core->md_size_sect != MD_128MB_SECT
3246                 ||  in_core->al_offset != MD_4kB_SECT
3247                 ||  in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3248                 ||  in_core->al_stripes != 1
3249                 ||  in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3250                         goto err;
3251         }
3252
3253         if (capacity < in_core->md_size_sect)
3254                 goto err;
3255         if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3256                 goto err;
3257
3258         /* should be aligned, and at least 32k */
3259         if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3260                 goto err;
3261
3262         /* should fit (for now: exactly) into the available on-disk space;
3263          * overflow prevention is in check_activity_log_stripe_size() above. */
3264         if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3265                 goto err;
3266
3267         /* again, should be aligned */
3268         if (in_core->bm_offset & 7)
3269                 goto err;
3270
3271         /* FIXME check for device grow with flex external meta data? */
3272
3273         /* can the available bitmap space cover the last agreed device size? */
3274         if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3275                 goto err;
3276
3277         return 0;
3278
3279 err:
3280         drbd_err(device, "meta data offsets don't make sense: idx=%d "
3281                         "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3282                         "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3283                         in_core->meta_dev_idx,
3284                         in_core->al_stripes, in_core->al_stripe_size_4k,
3285                         in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3286                         (unsigned long long)in_core->la_size_sect,
3287                         (unsigned long long)capacity);
3288
3289         return -EINVAL;
3290 }
3291
3292
3293 /**
3294  * drbd_md_read() - Reads in the meta data super block
3295  * @device:     DRBD device.
3296  * @bdev:       Device from which the meta data should be read in.
3297  *
3298  * Return NO_ERROR on success, and an enum drbd_ret_code in case
3299  * something goes wrong.
3300  *
3301  * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3302  * even before @bdev is assigned to @device->ldev.
3303  */
3304 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3305 {
3306         struct meta_data_on_disk *buffer;
3307         u32 magic, flags;
3308         int i, rv = NO_ERROR;
3309
3310         if (device->state.disk != D_DISKLESS)
3311                 return ERR_DISK_CONFIGURED;
3312
3313         buffer = drbd_md_get_buffer(device, __func__);
3314         if (!buffer)
3315                 return ERR_NOMEM;
3316
3317         /* First, figure out where our meta data superblock is located,
3318          * and read it. */
3319         bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3320         bdev->md.md_offset = drbd_md_ss(bdev);
3321         /* Even for (flexible or indexed) external meta data,
3322          * initially restrict us to the 4k superblock for now.
3323          * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3324         bdev->md.md_size_sect = 8;
3325
3326         if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3327                                  REQ_OP_READ)) {
3328                 /* NOTE: can't do normal error processing here as this is
3329                    called BEFORE disk is attached */
3330                 drbd_err(device, "Error while reading metadata.\n");
3331                 rv = ERR_IO_MD_DISK;
3332                 goto err;
3333         }
3334
3335         magic = be32_to_cpu(buffer->magic);
3336         flags = be32_to_cpu(buffer->flags);
3337         if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3338             (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3339                         /* btw: that's Activity Log clean, not "all" clean. */
3340                 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3341                 rv = ERR_MD_UNCLEAN;
3342                 goto err;
3343         }
3344
3345         rv = ERR_MD_INVALID;
3346         if (magic != DRBD_MD_MAGIC_08) {
3347                 if (magic == DRBD_MD_MAGIC_07)
3348                         drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3349                 else
3350                         drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3351                 goto err;
3352         }
3353
3354         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3355                 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3356                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3357                 goto err;
3358         }
3359
3360
3361         /* convert to in_core endian */
3362         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3363         for (i = UI_CURRENT; i < UI_SIZE; i++)
3364                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3365         bdev->md.flags = be32_to_cpu(buffer->flags);
3366         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3367
3368         bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3369         bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3370         bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3371
3372         if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3373                 goto err;
3374         if (check_offsets_and_sizes(device, bdev))
3375                 goto err;
3376
3377         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3378                 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3379                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3380                 goto err;
3381         }
3382         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3383                 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3384                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3385                 goto err;
3386         }
3387
3388         rv = NO_ERROR;
3389
3390         spin_lock_irq(&device->resource->req_lock);
3391         if (device->state.conn < C_CONNECTED) {
3392                 unsigned int peer;
3393                 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3394                 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3395                 device->peer_max_bio_size = peer;
3396         }
3397         spin_unlock_irq(&device->resource->req_lock);
3398
3399  err:
3400         drbd_md_put_buffer(device);
3401
3402         return rv;
3403 }
3404
3405 /**
3406  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3407  * @device:     DRBD device.
3408  *
3409  * Call this function if you change anything that should be written to
3410  * the meta-data super block. This function sets MD_DIRTY, and starts a
3411  * timer that ensures that within five seconds you have to call drbd_md_sync().
3412  */
3413 #ifdef DEBUG
3414 void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
3415 {
3416         if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3417                 mod_timer(&device->md_sync_timer, jiffies + HZ);
3418                 device->last_md_mark_dirty.line = line;
3419                 device->last_md_mark_dirty.func = func;
3420         }
3421 }
3422 #else
3423 void drbd_md_mark_dirty(struct drbd_device *device)
3424 {
3425         if (!test_and_set_bit(MD_DIRTY, &device->flags))
3426                 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3427 }
3428 #endif
3429
3430 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3431 {
3432         int i;
3433
3434         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3435                 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3436 }
3437
3438 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3439 {
3440         if (idx == UI_CURRENT) {
3441                 if (device->state.role == R_PRIMARY)
3442                         val |= 1;
3443                 else
3444                         val &= ~((u64)1);
3445
3446                 drbd_set_ed_uuid(device, val);
3447         }
3448
3449         device->ldev->md.uuid[idx] = val;
3450         drbd_md_mark_dirty(device);
3451 }
3452
3453 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3454 {
3455         unsigned long flags;
3456         spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3457         __drbd_uuid_set(device, idx, val);
3458         spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3459 }
3460
3461 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3462 {
3463         unsigned long flags;
3464         spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3465         if (device->ldev->md.uuid[idx]) {
3466                 drbd_uuid_move_history(device);
3467                 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3468         }
3469         __drbd_uuid_set(device, idx, val);
3470         spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3471 }
3472
3473 /**
3474  * drbd_uuid_new_current() - Creates a new current UUID
3475  * @device:     DRBD device.
3476  *
3477  * Creates a new current UUID, and rotates the old current UUID into
3478  * the bitmap slot. Causes an incremental resync upon next connect.
3479  */
3480 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3481 {
3482         u64 val;
3483         unsigned long long bm_uuid;
3484
3485         get_random_bytes(&val, sizeof(u64));
3486
3487         spin_lock_irq(&device->ldev->md.uuid_lock);
3488         bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3489
3490         if (bm_uuid)
3491                 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3492
3493         device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3494         __drbd_uuid_set(device, UI_CURRENT, val);
3495         spin_unlock_irq(&device->ldev->md.uuid_lock);
3496
3497         drbd_print_uuids(device, "new current UUID");
3498         /* get it to stable storage _now_ */
3499         drbd_md_sync(device);
3500 }
3501
3502 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3503 {
3504         unsigned long flags;
3505         if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3506                 return;
3507
3508         spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3509         if (val == 0) {
3510                 drbd_uuid_move_history(device);
3511                 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3512                 device->ldev->md.uuid[UI_BITMAP] = 0;
3513         } else {
3514                 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3515                 if (bm_uuid)
3516                         drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3517
3518                 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3519         }
3520         spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3521
3522         drbd_md_mark_dirty(device);
3523 }
3524
3525 /**
3526  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3527  * @device:     DRBD device.
3528  *
3529  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3530  */
3531 int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3532 {
3533         int rv = -EIO;
3534
3535         drbd_md_set_flag(device, MDF_FULL_SYNC);
3536         drbd_md_sync(device);
3537         drbd_bm_set_all(device);
3538
3539         rv = drbd_bm_write(device);
3540
3541         if (!rv) {
3542                 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3543                 drbd_md_sync(device);
3544         }
3545
3546         return rv;
3547 }
3548
3549 /**
3550  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3551  * @device:     DRBD device.
3552  *
3553  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3554  */
3555 int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3556 {
3557         drbd_resume_al(device);
3558         drbd_bm_clear_all(device);
3559         return drbd_bm_write(device);
3560 }
3561
3562 static int w_bitmap_io(struct drbd_work *w, int unused)
3563 {
3564         struct drbd_device *device =
3565                 container_of(w, struct drbd_device, bm_io_work.w);
3566         struct bm_io_work *work = &device->bm_io_work;
3567         int rv = -EIO;
3568
3569         if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3570                 int cnt = atomic_read(&device->ap_bio_cnt);
3571                 if (cnt)
3572                         drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3573                                         cnt, work->why);
3574         }
3575
3576         if (get_ldev(device)) {
3577                 drbd_bm_lock(device, work->why, work->flags);
3578                 rv = work->io_fn(device);
3579                 drbd_bm_unlock(device);
3580                 put_ldev(device);
3581         }
3582
3583         clear_bit_unlock(BITMAP_IO, &device->flags);
3584         wake_up(&device->misc_wait);
3585
3586         if (work->done)
3587                 work->done(device, rv);
3588
3589         clear_bit(BITMAP_IO_QUEUED, &device->flags);
3590         work->why = NULL;
3591         work->flags = 0;
3592
3593         return 0;
3594 }
3595
3596 /**
3597  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3598  * @device:     DRBD device.
3599  * @io_fn:      IO callback to be called when bitmap IO is possible
3600  * @done:       callback to be called after the bitmap IO was performed
3601  * @why:        Descriptive text of the reason for doing the IO
3602  *
3603  * While IO on the bitmap happens we freeze application IO thus we ensure
3604  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3605  * called from worker context. It MUST NOT be used while a previous such
3606  * work is still pending!
3607  *
3608  * Its worker function encloses the call of io_fn() by get_ldev() and
3609  * put_ldev().
3610  */
3611 void drbd_queue_bitmap_io(struct drbd_device *device,
3612                           int (*io_fn)(struct drbd_device *),
3613                           void (*done)(struct drbd_device *, int),
3614                           char *why, enum bm_flag flags)
3615 {
3616         D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3617
3618         D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3619         D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3620         D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3621         if (device->bm_io_work.why)
3622                 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3623                         why, device->bm_io_work.why);
3624
3625         device->bm_io_work.io_fn = io_fn;
3626         device->bm_io_work.done = done;
3627         device->bm_io_work.why = why;
3628         device->bm_io_work.flags = flags;
3629
3630         spin_lock_irq(&device->resource->req_lock);
3631         set_bit(BITMAP_IO, &device->flags);
3632         /* don't wait for pending application IO if the caller indicates that
3633          * application IO does not conflict anyways. */
3634         if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3635                 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3636                         drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3637                                         &device->bm_io_work.w);
3638         }
3639         spin_unlock_irq(&device->resource->req_lock);
3640 }
3641
3642 /**
3643  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3644  * @device:     DRBD device.
3645  * @io_fn:      IO callback to be called when bitmap IO is possible
3646  * @why:        Descriptive text of the reason for doing the IO
3647  *
3648  * freezes application IO while that the actual IO operations runs. This
3649  * functions MAY NOT be called from worker context.
3650  */
3651 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3652                 char *why, enum bm_flag flags)
3653 {
3654         /* Only suspend io, if some operation is supposed to be locked out */
3655         const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3656         int rv;
3657
3658         D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3659
3660         if (do_suspend_io)
3661                 drbd_suspend_io(device);
3662
3663         drbd_bm_lock(device, why, flags);
3664         rv = io_fn(device);
3665         drbd_bm_unlock(device);
3666
3667         if (do_suspend_io)
3668                 drbd_resume_io(device);
3669
3670         return rv;
3671 }
3672
3673 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3674 {
3675         if ((device->ldev->md.flags & flag) != flag) {
3676                 drbd_md_mark_dirty(device);
3677                 device->ldev->md.flags |= flag;
3678         }
3679 }
3680
3681 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3682 {
3683         if ((device->ldev->md.flags & flag) != 0) {
3684                 drbd_md_mark_dirty(device);
3685                 device->ldev->md.flags &= ~flag;
3686         }
3687 }
3688 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3689 {
3690         return (bdev->md.flags & flag) != 0;
3691 }
3692
3693 static void md_sync_timer_fn(struct timer_list *t)
3694 {
3695         struct drbd_device *device = from_timer(device, t, md_sync_timer);
3696         drbd_device_post_work(device, MD_SYNC);
3697 }
3698
3699 const char *cmdname(enum drbd_packet cmd)
3700 {
3701         /* THINK may need to become several global tables
3702          * when we want to support more than
3703          * one PRO_VERSION */
3704         static const char *cmdnames[] = {
3705                 [P_DATA]                = "Data",
3706                 [P_WSAME]               = "WriteSame",
3707                 [P_TRIM]                = "Trim",
3708                 [P_DATA_REPLY]          = "DataReply",
3709                 [P_RS_DATA_REPLY]       = "RSDataReply",
3710                 [P_BARRIER]             = "Barrier",
3711                 [P_BITMAP]              = "ReportBitMap",
3712                 [P_BECOME_SYNC_TARGET]  = "BecomeSyncTarget",
3713                 [P_BECOME_SYNC_SOURCE]  = "BecomeSyncSource",
3714                 [P_UNPLUG_REMOTE]       = "UnplugRemote",
3715                 [P_DATA_REQUEST]        = "DataRequest",
3716                 [P_RS_DATA_REQUEST]     = "RSDataRequest",
3717                 [P_SYNC_PARAM]          = "SyncParam",
3718                 [P_SYNC_PARAM89]        = "SyncParam89",
3719                 [P_PROTOCOL]            = "ReportProtocol",
3720                 [P_UUIDS]               = "ReportUUIDs",
3721                 [P_SIZES]               = "ReportSizes",
3722                 [P_STATE]               = "ReportState",
3723                 [P_SYNC_UUID]           = "ReportSyncUUID",
3724                 [P_AUTH_CHALLENGE]      = "AuthChallenge",
3725                 [P_AUTH_RESPONSE]       = "AuthResponse",
3726                 [P_PING]                = "Ping",
3727                 [P_PING_ACK]            = "PingAck",
3728                 [P_RECV_ACK]            = "RecvAck",
3729                 [P_WRITE_ACK]           = "WriteAck",
3730                 [P_RS_WRITE_ACK]        = "RSWriteAck",
3731                 [P_SUPERSEDED]          = "Superseded",
3732                 [P_NEG_ACK]             = "NegAck",
3733                 [P_NEG_DREPLY]          = "NegDReply",
3734                 [P_NEG_RS_DREPLY]       = "NegRSDReply",
3735                 [P_BARRIER_ACK]         = "BarrierAck",
3736                 [P_STATE_CHG_REQ]       = "StateChgRequest",
3737                 [P_STATE_CHG_REPLY]     = "StateChgReply",
3738                 [P_OV_REQUEST]          = "OVRequest",
3739                 [P_OV_REPLY]            = "OVReply",
3740                 [P_OV_RESULT]           = "OVResult",
3741                 [P_CSUM_RS_REQUEST]     = "CsumRSRequest",
3742                 [P_RS_IS_IN_SYNC]       = "CsumRSIsInSync",
3743                 [P_COMPRESSED_BITMAP]   = "CBitmap",
3744                 [P_DELAY_PROBE]         = "DelayProbe",
3745                 [P_OUT_OF_SYNC]         = "OutOfSync",
3746                 [P_RETRY_WRITE]         = "RetryWrite",
3747                 [P_RS_CANCEL]           = "RSCancel",
3748                 [P_CONN_ST_CHG_REQ]     = "conn_st_chg_req",
3749                 [P_CONN_ST_CHG_REPLY]   = "conn_st_chg_reply",
3750                 [P_RETRY_WRITE]         = "retry_write",
3751                 [P_PROTOCOL_UPDATE]     = "protocol_update",
3752                 [P_RS_THIN_REQ]         = "rs_thin_req",
3753                 [P_RS_DEALLOCATED]      = "rs_deallocated",
3754
3755                 /* enum drbd_packet, but not commands - obsoleted flags:
3756                  *      P_MAY_IGNORE
3757                  *      P_MAX_OPT_CMD
3758                  */
3759         };
3760
3761         /* too big for the array: 0xfffX */
3762         if (cmd == P_INITIAL_META)
3763                 return "InitialMeta";
3764         if (cmd == P_INITIAL_DATA)
3765                 return "InitialData";
3766         if (cmd == P_CONNECTION_FEATURES)
3767                 return "ConnectionFeatures";
3768         if (cmd >= ARRAY_SIZE(cmdnames))
3769                 return "Unknown";
3770         return cmdnames[cmd];
3771 }
3772
3773 /**
3774  * drbd_wait_misc  -  wait for a request to make progress
3775  * @device:     device associated with the request
3776  * @i:          the struct drbd_interval embedded in struct drbd_request or
3777  *              struct drbd_peer_request
3778  */
3779 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3780 {
3781         struct net_conf *nc;
3782         DEFINE_WAIT(wait);
3783         long timeout;
3784
3785         rcu_read_lock();
3786         nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3787         if (!nc) {
3788                 rcu_read_unlock();
3789                 return -ETIMEDOUT;
3790         }
3791         timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3792         rcu_read_unlock();
3793
3794         /* Indicate to wake up device->misc_wait on progress.  */
3795         i->waiting = true;
3796         prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3797         spin_unlock_irq(&device->resource->req_lock);
3798         timeout = schedule_timeout(timeout);
3799         finish_wait(&device->misc_wait, &wait);
3800         spin_lock_irq(&device->resource->req_lock);
3801         if (!timeout || device->state.conn < C_CONNECTED)
3802                 return -ETIMEDOUT;
3803         if (signal_pending(current))
3804                 return -ERESTARTSYS;
3805         return 0;
3806 }
3807
3808 void lock_all_resources(void)
3809 {
3810         struct drbd_resource *resource;
3811         int __maybe_unused i = 0;
3812
3813         mutex_lock(&resources_mutex);
3814         local_irq_disable();
3815         for_each_resource(resource, &drbd_resources)
3816                 spin_lock_nested(&resource->req_lock, i++);
3817 }
3818
3819 void unlock_all_resources(void)
3820 {
3821         struct drbd_resource *resource;
3822
3823         for_each_resource(resource, &drbd_resources)
3824                 spin_unlock(&resource->req_lock);
3825         local_irq_enable();
3826         mutex_unlock(&resources_mutex);
3827 }
3828
3829 #ifdef CONFIG_DRBD_FAULT_INJECTION
3830 /* Fault insertion support including random number generator shamelessly
3831  * stolen from kernel/rcutorture.c */
3832 struct fault_random_state {
3833         unsigned long state;
3834         unsigned long count;
3835 };
3836
3837 #define FAULT_RANDOM_MULT 39916801  /* prime */
3838 #define FAULT_RANDOM_ADD        479001701 /* prime */
3839 #define FAULT_RANDOM_REFRESH 10000
3840
3841 /*
3842  * Crude but fast random-number generator.  Uses a linear congruential
3843  * generator, with occasional help from get_random_bytes().
3844  */
3845 static unsigned long
3846 _drbd_fault_random(struct fault_random_state *rsp)
3847 {
3848         long refresh;
3849
3850         if (!rsp->count--) {
3851                 get_random_bytes(&refresh, sizeof(refresh));
3852                 rsp->state += refresh;
3853                 rsp->count = FAULT_RANDOM_REFRESH;
3854         }
3855         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3856         return swahw32(rsp->state);
3857 }
3858
3859 static char *
3860 _drbd_fault_str(unsigned int type) {
3861         static char *_faults[] = {
3862                 [DRBD_FAULT_MD_WR] = "Meta-data write",
3863                 [DRBD_FAULT_MD_RD] = "Meta-data read",
3864                 [DRBD_FAULT_RS_WR] = "Resync write",
3865                 [DRBD_FAULT_RS_RD] = "Resync read",
3866                 [DRBD_FAULT_DT_WR] = "Data write",
3867                 [DRBD_FAULT_DT_RD] = "Data read",
3868                 [DRBD_FAULT_DT_RA] = "Data read ahead",
3869                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3870                 [DRBD_FAULT_AL_EE] = "EE allocation",
3871                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3872         };
3873
3874         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3875 }
3876
3877 unsigned int
3878 _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3879 {
3880         static struct fault_random_state rrs = {0, 0};
3881
3882         unsigned int ret = (
3883                 (drbd_fault_devs == 0 ||
3884                         ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3885                 (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
3886
3887         if (ret) {
3888                 drbd_fault_count++;
3889
3890                 if (__ratelimit(&drbd_ratelimit_state))
3891                         drbd_warn(device, "***Simulating %s failure\n",
3892                                 _drbd_fault_str(type));
3893         }
3894
3895         return ret;
3896 }
3897 #endif
3898
3899 const char *drbd_buildtag(void)
3900 {
3901         /* DRBD built from external sources has here a reference to the
3902            git hash of the source code. */
3903
3904         static char buildtag[38] = "\0uilt-in";
3905
3906         if (buildtag[0] == 0) {
3907 #ifdef MODULE
3908                 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3909 #else
3910                 buildtag[0] = 'b';
3911 #endif
3912         }
3913
3914         return buildtag;
3915 }
3916
3917 module_init(drbd_init)
3918 module_exit(drbd_cleanup)
3919
3920 EXPORT_SYMBOL(drbd_conn_str);
3921 EXPORT_SYMBOL(drbd_role_str);
3922 EXPORT_SYMBOL(drbd_disk_str);
3923 EXPORT_SYMBOL(drbd_set_st_err_str);