GNU Linux-libre 4.9.337-gnu1
[releases.git] / net / ceph / osd_client.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
21
22 #define OSD_OPREPLY_FRONT_LEN   512
23
24 static struct kmem_cache        *ceph_osd_request_cache;
25
26 static const struct ceph_connection_operations osd_con_ops;
27
28 /*
29  * Implement client access to distributed object storage cluster.
30  *
31  * All data objects are stored within a cluster/cloud of OSDs, or
32  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
33  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
34  * remote daemons serving up and coordinating consistent and safe
35  * access to storage.
36  *
37  * Cluster membership and the mapping of data objects onto storage devices
38  * are described by the osd map.
39  *
40  * We keep track of pending OSD requests (read, write), resubmit
41  * requests to different OSDs when the cluster topology/data layout
42  * change, or retry the affected requests when the communications
43  * channel with an OSD is reset.
44  */
45
46 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
47 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void link_linger(struct ceph_osd *osd,
49                         struct ceph_osd_linger_request *lreq);
50 static void unlink_linger(struct ceph_osd *osd,
51                           struct ceph_osd_linger_request *lreq);
52
53 #if 1
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
55 {
56         bool wrlocked = true;
57
58         if (unlikely(down_read_trylock(sem))) {
59                 wrlocked = false;
60                 up_read(sem);
61         }
62
63         return wrlocked;
64 }
65 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
66 {
67         WARN_ON(!rwsem_is_locked(&osdc->lock));
68 }
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
72 }
73 static inline void verify_osd_locked(struct ceph_osd *osd)
74 {
75         struct ceph_osd_client *osdc = osd->o_osdc;
76
77         WARN_ON(!(mutex_is_locked(&osd->lock) &&
78                   rwsem_is_locked(&osdc->lock)) &&
79                 !rwsem_is_wrlocked(&osdc->lock));
80 }
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
82 {
83         WARN_ON(!mutex_is_locked(&lreq->lock));
84 }
85 #else
86 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
88 static inline void verify_osd_locked(struct ceph_osd *osd) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
90 #endif
91
92 /*
93  * calculate the mapping of a file extent onto an object, and fill out the
94  * request accordingly.  shorten extent as necessary if it crosses an
95  * object boundary.
96  *
97  * fill osd op in request message.
98  */
99 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
100                         u64 *objnum, u64 *objoff, u64 *objlen)
101 {
102         u64 orig_len = *plen;
103         int r;
104
105         /* object extent? */
106         r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
107                                           objoff, objlen);
108         if (r < 0)
109                 return r;
110         if (*objlen < orig_len) {
111                 *plen = *objlen;
112                 dout(" skipping last %llu, final file extent %llu~%llu\n",
113                      orig_len - *plen, off, *plen);
114         }
115
116         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
117
118         return 0;
119 }
120
121 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
122 {
123         memset(osd_data, 0, sizeof (*osd_data));
124         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
125 }
126
127 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
128                         struct page **pages, u64 length, u32 alignment,
129                         bool pages_from_pool, bool own_pages)
130 {
131         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
132         osd_data->pages = pages;
133         osd_data->length = length;
134         osd_data->alignment = alignment;
135         osd_data->pages_from_pool = pages_from_pool;
136         osd_data->own_pages = own_pages;
137 }
138
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
140                         struct ceph_pagelist *pagelist)
141 {
142         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
143         osd_data->pagelist = pagelist;
144 }
145
146 #ifdef CONFIG_BLOCK
147 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
148                         struct bio *bio, size_t bio_length)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
151         osd_data->bio = bio;
152         osd_data->bio_length = bio_length;
153 }
154 #endif /* CONFIG_BLOCK */
155
156 #define osd_req_op_data(oreq, whch, typ, fld)                           \
157 ({                                                                      \
158         struct ceph_osd_request *__oreq = (oreq);                       \
159         unsigned int __whch = (whch);                                   \
160         BUG_ON(__whch >= __oreq->r_num_ops);                            \
161         &__oreq->r_ops[__whch].typ.fld;                                 \
162 })
163
164 static struct ceph_osd_data *
165 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
166 {
167         BUG_ON(which >= osd_req->r_num_ops);
168
169         return &osd_req->r_ops[which].raw_data_in;
170 }
171
172 struct ceph_osd_data *
173 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
174                         unsigned int which)
175 {
176         return osd_req_op_data(osd_req, which, extent, osd_data);
177 }
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
179
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
181                         unsigned int which, struct page **pages,
182                         u64 length, u32 alignment,
183                         bool pages_from_pool, bool own_pages)
184 {
185         struct ceph_osd_data *osd_data;
186
187         osd_data = osd_req_op_raw_data_in(osd_req, which);
188         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
189                                 pages_from_pool, own_pages);
190 }
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
192
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
194                         unsigned int which, struct page **pages,
195                         u64 length, u32 alignment,
196                         bool pages_from_pool, bool own_pages)
197 {
198         struct ceph_osd_data *osd_data;
199
200         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
201         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
202                                 pages_from_pool, own_pages);
203 }
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
205
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
207                         unsigned int which, struct ceph_pagelist *pagelist)
208 {
209         struct ceph_osd_data *osd_data;
210
211         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
212         ceph_osd_data_pagelist_init(osd_data, pagelist);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
215
216 #ifdef CONFIG_BLOCK
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
218                         unsigned int which, struct bio *bio, size_t bio_length)
219 {
220         struct ceph_osd_data *osd_data;
221
222         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
223         ceph_osd_data_bio_init(osd_data, bio, bio_length);
224 }
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
226 #endif /* CONFIG_BLOCK */
227
228 static void osd_req_op_cls_request_info_pagelist(
229                         struct ceph_osd_request *osd_req,
230                         unsigned int which, struct ceph_pagelist *pagelist)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
235         ceph_osd_data_pagelist_init(osd_data, pagelist);
236 }
237
238 void osd_req_op_cls_request_data_pagelist(
239                         struct ceph_osd_request *osd_req,
240                         unsigned int which, struct ceph_pagelist *pagelist)
241 {
242         struct ceph_osd_data *osd_data;
243
244         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
245         ceph_osd_data_pagelist_init(osd_data, pagelist);
246         osd_req->r_ops[which].cls.indata_len += pagelist->length;
247         osd_req->r_ops[which].indata_len += pagelist->length;
248 }
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
250
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
252                         unsigned int which, struct page **pages, u64 length,
253                         u32 alignment, bool pages_from_pool, bool own_pages)
254 {
255         struct ceph_osd_data *osd_data;
256
257         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
258         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
259                                 pages_from_pool, own_pages);
260         osd_req->r_ops[which].cls.indata_len += length;
261         osd_req->r_ops[which].indata_len += length;
262 }
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
264
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
266                         unsigned int which, struct page **pages, u64 length,
267                         u32 alignment, bool pages_from_pool, bool own_pages)
268 {
269         struct ceph_osd_data *osd_data;
270
271         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
272         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
273                                 pages_from_pool, own_pages);
274 }
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
276
277 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
278 {
279         switch (osd_data->type) {
280         case CEPH_OSD_DATA_TYPE_NONE:
281                 return 0;
282         case CEPH_OSD_DATA_TYPE_PAGES:
283                 return osd_data->length;
284         case CEPH_OSD_DATA_TYPE_PAGELIST:
285                 return (u64)osd_data->pagelist->length;
286 #ifdef CONFIG_BLOCK
287         case CEPH_OSD_DATA_TYPE_BIO:
288                 return (u64)osd_data->bio_length;
289 #endif /* CONFIG_BLOCK */
290         default:
291                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
292                 return 0;
293         }
294 }
295
296 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
297 {
298         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
299                 int num_pages;
300
301                 num_pages = calc_pages_for((u64)osd_data->alignment,
302                                                 (u64)osd_data->length);
303                 ceph_release_page_vector(osd_data->pages, num_pages);
304         }
305         ceph_osd_data_init(osd_data);
306 }
307
308 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
309                         unsigned int which)
310 {
311         struct ceph_osd_req_op *op;
312
313         BUG_ON(which >= osd_req->r_num_ops);
314         op = &osd_req->r_ops[which];
315
316         switch (op->op) {
317         case CEPH_OSD_OP_READ:
318         case CEPH_OSD_OP_WRITE:
319         case CEPH_OSD_OP_WRITEFULL:
320                 ceph_osd_data_release(&op->extent.osd_data);
321                 break;
322         case CEPH_OSD_OP_CALL:
323                 ceph_osd_data_release(&op->cls.request_info);
324                 ceph_osd_data_release(&op->cls.request_data);
325                 ceph_osd_data_release(&op->cls.response_data);
326                 break;
327         case CEPH_OSD_OP_SETXATTR:
328         case CEPH_OSD_OP_CMPXATTR:
329                 ceph_osd_data_release(&op->xattr.osd_data);
330                 break;
331         case CEPH_OSD_OP_STAT:
332                 ceph_osd_data_release(&op->raw_data_in);
333                 break;
334         case CEPH_OSD_OP_NOTIFY_ACK:
335                 ceph_osd_data_release(&op->notify_ack.request_data);
336                 break;
337         case CEPH_OSD_OP_NOTIFY:
338                 ceph_osd_data_release(&op->notify.request_data);
339                 ceph_osd_data_release(&op->notify.response_data);
340                 break;
341         case CEPH_OSD_OP_LIST_WATCHERS:
342                 ceph_osd_data_release(&op->list_watchers.response_data);
343                 break;
344         default:
345                 break;
346         }
347 }
348
349 /*
350  * Assumes @t is zero-initialized.
351  */
352 static void target_init(struct ceph_osd_request_target *t)
353 {
354         ceph_oid_init(&t->base_oid);
355         ceph_oloc_init(&t->base_oloc);
356         ceph_oid_init(&t->target_oid);
357         ceph_oloc_init(&t->target_oloc);
358
359         ceph_osds_init(&t->acting);
360         ceph_osds_init(&t->up);
361         t->size = -1;
362         t->min_size = -1;
363
364         t->osd = CEPH_HOMELESS_OSD;
365 }
366
367 static void target_copy(struct ceph_osd_request_target *dest,
368                         const struct ceph_osd_request_target *src)
369 {
370         ceph_oid_copy(&dest->base_oid, &src->base_oid);
371         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
372         ceph_oid_copy(&dest->target_oid, &src->target_oid);
373         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
374
375         dest->pgid = src->pgid; /* struct */
376         dest->pg_num = src->pg_num;
377         dest->pg_num_mask = src->pg_num_mask;
378         ceph_osds_copy(&dest->acting, &src->acting);
379         ceph_osds_copy(&dest->up, &src->up);
380         dest->size = src->size;
381         dest->min_size = src->min_size;
382         dest->sort_bitwise = src->sort_bitwise;
383
384         dest->flags = src->flags;
385         dest->paused = src->paused;
386
387         dest->osd = src->osd;
388 }
389
390 static void target_destroy(struct ceph_osd_request_target *t)
391 {
392         ceph_oid_destroy(&t->base_oid);
393         ceph_oloc_destroy(&t->base_oloc);
394         ceph_oid_destroy(&t->target_oid);
395         ceph_oloc_destroy(&t->target_oloc);
396 }
397
398 /*
399  * requests
400  */
401 static void request_release_checks(struct ceph_osd_request *req)
402 {
403         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
404         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
405         WARN_ON(!list_empty(&req->r_unsafe_item));
406         WARN_ON(req->r_osd);
407 }
408
409 static void ceph_osdc_release_request(struct kref *kref)
410 {
411         struct ceph_osd_request *req = container_of(kref,
412                                             struct ceph_osd_request, r_kref);
413         unsigned int which;
414
415         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
416              req->r_request, req->r_reply);
417         request_release_checks(req);
418
419         if (req->r_request)
420                 ceph_msg_put(req->r_request);
421         if (req->r_reply)
422                 ceph_msg_put(req->r_reply);
423
424         for (which = 0; which < req->r_num_ops; which++)
425                 osd_req_op_data_release(req, which);
426
427         target_destroy(&req->r_t);
428         ceph_put_snap_context(req->r_snapc);
429
430         if (req->r_mempool)
431                 mempool_free(req, req->r_osdc->req_mempool);
432         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
433                 kmem_cache_free(ceph_osd_request_cache, req);
434         else
435                 kfree(req);
436 }
437
438 void ceph_osdc_get_request(struct ceph_osd_request *req)
439 {
440         dout("%s %p (was %d)\n", __func__, req,
441              atomic_read(&req->r_kref.refcount));
442         kref_get(&req->r_kref);
443 }
444 EXPORT_SYMBOL(ceph_osdc_get_request);
445
446 void ceph_osdc_put_request(struct ceph_osd_request *req)
447 {
448         if (req) {
449                 dout("%s %p (was %d)\n", __func__, req,
450                      atomic_read(&req->r_kref.refcount));
451                 kref_put(&req->r_kref, ceph_osdc_release_request);
452         }
453 }
454 EXPORT_SYMBOL(ceph_osdc_put_request);
455
456 static void request_init(struct ceph_osd_request *req)
457 {
458         /* req only, each op is zeroed in _osd_req_op_init() */
459         memset(req, 0, sizeof(*req));
460
461         kref_init(&req->r_kref);
462         init_completion(&req->r_completion);
463         init_completion(&req->r_safe_completion);
464         RB_CLEAR_NODE(&req->r_node);
465         RB_CLEAR_NODE(&req->r_mc_node);
466         INIT_LIST_HEAD(&req->r_unsafe_item);
467
468         target_init(&req->r_t);
469 }
470
471 /*
472  * This is ugly, but it allows us to reuse linger registration and ping
473  * requests, keeping the structure of the code around send_linger{_ping}()
474  * reasonable.  Setting up a min_nr=2 mempool for each linger request
475  * and dealing with copying ops (this blasts req only, watch op remains
476  * intact) isn't any better.
477  */
478 static void request_reinit(struct ceph_osd_request *req)
479 {
480         struct ceph_osd_client *osdc = req->r_osdc;
481         bool mempool = req->r_mempool;
482         unsigned int num_ops = req->r_num_ops;
483         u64 snapid = req->r_snapid;
484         struct ceph_snap_context *snapc = req->r_snapc;
485         bool linger = req->r_linger;
486         struct ceph_msg *request_msg = req->r_request;
487         struct ceph_msg *reply_msg = req->r_reply;
488
489         dout("%s req %p\n", __func__, req);
490         WARN_ON(atomic_read(&req->r_kref.refcount) != 1);
491         request_release_checks(req);
492
493         WARN_ON(atomic_read(&request_msg->kref.refcount) != 1);
494         WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1);
495         target_destroy(&req->r_t);
496
497         request_init(req);
498         req->r_osdc = osdc;
499         req->r_mempool = mempool;
500         req->r_num_ops = num_ops;
501         req->r_snapid = snapid;
502         req->r_snapc = snapc;
503         req->r_linger = linger;
504         req->r_request = request_msg;
505         req->r_reply = reply_msg;
506 }
507
508 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
509                                                struct ceph_snap_context *snapc,
510                                                unsigned int num_ops,
511                                                bool use_mempool,
512                                                gfp_t gfp_flags)
513 {
514         struct ceph_osd_request *req;
515
516         if (use_mempool) {
517                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
518                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
519         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
520                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
521         } else {
522                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
523                 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
524                               gfp_flags);
525         }
526         if (unlikely(!req))
527                 return NULL;
528
529         request_init(req);
530         req->r_osdc = osdc;
531         req->r_mempool = use_mempool;
532         req->r_num_ops = num_ops;
533         req->r_snapid = CEPH_NOSNAP;
534         req->r_snapc = ceph_get_snap_context(snapc);
535
536         dout("%s req %p\n", __func__, req);
537         return req;
538 }
539 EXPORT_SYMBOL(ceph_osdc_alloc_request);
540
541 static int ceph_oloc_encoding_size(struct ceph_object_locator *oloc)
542 {
543         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
544 }
545
546 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
547 {
548         struct ceph_osd_client *osdc = req->r_osdc;
549         struct ceph_msg *msg;
550         int msg_size;
551
552         WARN_ON(ceph_oid_empty(&req->r_base_oid));
553         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
554
555         /* create request message */
556         msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
557         msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
558         msg_size += CEPH_ENCODING_START_BLK_LEN +
559                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
560         msg_size += 1 + 8 + 4 + 4; /* pgid */
561         msg_size += 4 + req->r_base_oid.name_len; /* oid */
562         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
563         msg_size += 8; /* snapid */
564         msg_size += 8; /* snap_seq */
565         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
566         msg_size += 4; /* retry_attempt */
567
568         if (req->r_mempool)
569                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
570         else
571                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
572         if (!msg)
573                 return -ENOMEM;
574
575         memset(msg->front.iov_base, 0, msg->front.iov_len);
576         req->r_request = msg;
577
578         /* create reply message */
579         msg_size = OSD_OPREPLY_FRONT_LEN;
580         msg_size += req->r_base_oid.name_len;
581         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
582
583         if (req->r_mempool)
584                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
585         else
586                 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
587         if (!msg)
588                 return -ENOMEM;
589
590         req->r_reply = msg;
591
592         return 0;
593 }
594 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
595
596 static bool osd_req_opcode_valid(u16 opcode)
597 {
598         switch (opcode) {
599 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
600 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
601 #undef GENERATE_CASE
602         default:
603                 return false;
604         }
605 }
606
607 /*
608  * This is an osd op init function for opcodes that have no data or
609  * other information associated with them.  It also serves as a
610  * common init routine for all the other init functions, below.
611  */
612 static struct ceph_osd_req_op *
613 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
614                  u16 opcode, u32 flags)
615 {
616         struct ceph_osd_req_op *op;
617
618         BUG_ON(which >= osd_req->r_num_ops);
619         BUG_ON(!osd_req_opcode_valid(opcode));
620
621         op = &osd_req->r_ops[which];
622         memset(op, 0, sizeof (*op));
623         op->op = opcode;
624         op->flags = flags;
625
626         return op;
627 }
628
629 void osd_req_op_init(struct ceph_osd_request *osd_req,
630                      unsigned int which, u16 opcode, u32 flags)
631 {
632         (void)_osd_req_op_init(osd_req, which, opcode, flags);
633 }
634 EXPORT_SYMBOL(osd_req_op_init);
635
636 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
637                                 unsigned int which, u16 opcode,
638                                 u64 offset, u64 length,
639                                 u64 truncate_size, u32 truncate_seq)
640 {
641         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
642                                                       opcode, 0);
643         size_t payload_len = 0;
644
645         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
646                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
647                opcode != CEPH_OSD_OP_TRUNCATE);
648
649         op->extent.offset = offset;
650         op->extent.length = length;
651         op->extent.truncate_size = truncate_size;
652         op->extent.truncate_seq = truncate_seq;
653         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
654                 payload_len += length;
655
656         op->indata_len = payload_len;
657 }
658 EXPORT_SYMBOL(osd_req_op_extent_init);
659
660 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
661                                 unsigned int which, u64 length)
662 {
663         struct ceph_osd_req_op *op;
664         u64 previous;
665
666         BUG_ON(which >= osd_req->r_num_ops);
667         op = &osd_req->r_ops[which];
668         previous = op->extent.length;
669
670         if (length == previous)
671                 return;         /* Nothing to do */
672         BUG_ON(length > previous);
673
674         op->extent.length = length;
675         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
676                 op->indata_len -= previous - length;
677 }
678 EXPORT_SYMBOL(osd_req_op_extent_update);
679
680 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
681                                 unsigned int which, u64 offset_inc)
682 {
683         struct ceph_osd_req_op *op, *prev_op;
684
685         BUG_ON(which + 1 >= osd_req->r_num_ops);
686
687         prev_op = &osd_req->r_ops[which];
688         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
689         /* dup previous one */
690         op->indata_len = prev_op->indata_len;
691         op->outdata_len = prev_op->outdata_len;
692         op->extent = prev_op->extent;
693         /* adjust offset */
694         op->extent.offset += offset_inc;
695         op->extent.length -= offset_inc;
696
697         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
698                 op->indata_len -= offset_inc;
699 }
700 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
701
702 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
703                         u16 opcode, const char *class, const char *method)
704 {
705         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
706                                                       opcode, 0);
707         struct ceph_pagelist *pagelist;
708         size_t payload_len = 0;
709         size_t size;
710
711         BUG_ON(opcode != CEPH_OSD_OP_CALL);
712
713         pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
714         BUG_ON(!pagelist);
715         ceph_pagelist_init(pagelist);
716
717         op->cls.class_name = class;
718         size = strlen(class);
719         BUG_ON(size > (size_t) U8_MAX);
720         op->cls.class_len = size;
721         ceph_pagelist_append(pagelist, class, size);
722         payload_len += size;
723
724         op->cls.method_name = method;
725         size = strlen(method);
726         BUG_ON(size > (size_t) U8_MAX);
727         op->cls.method_len = size;
728         ceph_pagelist_append(pagelist, method, size);
729         payload_len += size;
730
731         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
732
733         op->indata_len = payload_len;
734 }
735 EXPORT_SYMBOL(osd_req_op_cls_init);
736
737 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
738                           u16 opcode, const char *name, const void *value,
739                           size_t size, u8 cmp_op, u8 cmp_mode)
740 {
741         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
742                                                       opcode, 0);
743         struct ceph_pagelist *pagelist;
744         size_t payload_len;
745
746         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
747
748         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
749         if (!pagelist)
750                 return -ENOMEM;
751
752         ceph_pagelist_init(pagelist);
753
754         payload_len = strlen(name);
755         op->xattr.name_len = payload_len;
756         ceph_pagelist_append(pagelist, name, payload_len);
757
758         op->xattr.value_len = size;
759         ceph_pagelist_append(pagelist, value, size);
760         payload_len += size;
761
762         op->xattr.cmp_op = cmp_op;
763         op->xattr.cmp_mode = cmp_mode;
764
765         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
766         op->indata_len = payload_len;
767         return 0;
768 }
769 EXPORT_SYMBOL(osd_req_op_xattr_init);
770
771 /*
772  * @watch_opcode: CEPH_OSD_WATCH_OP_*
773  */
774 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
775                                   u64 cookie, u8 watch_opcode)
776 {
777         struct ceph_osd_req_op *op;
778
779         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
780         op->watch.cookie = cookie;
781         op->watch.op = watch_opcode;
782         op->watch.gen = 0;
783 }
784
785 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
786                                 unsigned int which,
787                                 u64 expected_object_size,
788                                 u64 expected_write_size)
789 {
790         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
791                                                       CEPH_OSD_OP_SETALLOCHINT,
792                                                       0);
793
794         op->alloc_hint.expected_object_size = expected_object_size;
795         op->alloc_hint.expected_write_size = expected_write_size;
796
797         /*
798          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
799          * not worth a feature bit.  Set FAILOK per-op flag to make
800          * sure older osds don't trip over an unsupported opcode.
801          */
802         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
803 }
804 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
805
806 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
807                                 struct ceph_osd_data *osd_data)
808 {
809         u64 length = ceph_osd_data_length(osd_data);
810
811         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
812                 BUG_ON(length > (u64) SIZE_MAX);
813                 if (length)
814                         ceph_msg_data_add_pages(msg, osd_data->pages,
815                                         length, osd_data->alignment);
816         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
817                 BUG_ON(!length);
818                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
819 #ifdef CONFIG_BLOCK
820         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
821                 ceph_msg_data_add_bio(msg, osd_data->bio, length);
822 #endif
823         } else {
824                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
825         }
826 }
827
828 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
829                              const struct ceph_osd_req_op *src)
830 {
831         if (WARN_ON(!osd_req_opcode_valid(src->op))) {
832                 pr_err("unrecognized osd opcode %d\n", src->op);
833
834                 return 0;
835         }
836
837         switch (src->op) {
838         case CEPH_OSD_OP_STAT:
839                 break;
840         case CEPH_OSD_OP_READ:
841         case CEPH_OSD_OP_WRITE:
842         case CEPH_OSD_OP_WRITEFULL:
843         case CEPH_OSD_OP_ZERO:
844         case CEPH_OSD_OP_TRUNCATE:
845                 dst->extent.offset = cpu_to_le64(src->extent.offset);
846                 dst->extent.length = cpu_to_le64(src->extent.length);
847                 dst->extent.truncate_size =
848                         cpu_to_le64(src->extent.truncate_size);
849                 dst->extent.truncate_seq =
850                         cpu_to_le32(src->extent.truncate_seq);
851                 break;
852         case CEPH_OSD_OP_CALL:
853                 dst->cls.class_len = src->cls.class_len;
854                 dst->cls.method_len = src->cls.method_len;
855                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
856                 break;
857         case CEPH_OSD_OP_STARTSYNC:
858                 break;
859         case CEPH_OSD_OP_WATCH:
860                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
861                 dst->watch.ver = cpu_to_le64(0);
862                 dst->watch.op = src->watch.op;
863                 dst->watch.gen = cpu_to_le32(src->watch.gen);
864                 break;
865         case CEPH_OSD_OP_NOTIFY_ACK:
866                 break;
867         case CEPH_OSD_OP_NOTIFY:
868                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
869                 break;
870         case CEPH_OSD_OP_LIST_WATCHERS:
871                 break;
872         case CEPH_OSD_OP_SETALLOCHINT:
873                 dst->alloc_hint.expected_object_size =
874                     cpu_to_le64(src->alloc_hint.expected_object_size);
875                 dst->alloc_hint.expected_write_size =
876                     cpu_to_le64(src->alloc_hint.expected_write_size);
877                 break;
878         case CEPH_OSD_OP_SETXATTR:
879         case CEPH_OSD_OP_CMPXATTR:
880                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
881                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
882                 dst->xattr.cmp_op = src->xattr.cmp_op;
883                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
884                 break;
885         case CEPH_OSD_OP_CREATE:
886         case CEPH_OSD_OP_DELETE:
887                 break;
888         default:
889                 pr_err("unsupported osd opcode %s\n",
890                         ceph_osd_op_name(src->op));
891                 WARN_ON(1);
892
893                 return 0;
894         }
895
896         dst->op = cpu_to_le16(src->op);
897         dst->flags = cpu_to_le32(src->flags);
898         dst->payload_len = cpu_to_le32(src->indata_len);
899
900         return src->indata_len;
901 }
902
903 /*
904  * build new request AND message, calculate layout, and adjust file
905  * extent as needed.
906  *
907  * if the file was recently truncated, we include information about its
908  * old and new size so that the object can be updated appropriately.  (we
909  * avoid synchronously deleting truncated objects because it's slow.)
910  *
911  * if @do_sync, include a 'startsync' command so that the osd will flush
912  * data quickly.
913  */
914 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
915                                                struct ceph_file_layout *layout,
916                                                struct ceph_vino vino,
917                                                u64 off, u64 *plen,
918                                                unsigned int which, int num_ops,
919                                                int opcode, int flags,
920                                                struct ceph_snap_context *snapc,
921                                                u32 truncate_seq,
922                                                u64 truncate_size,
923                                                bool use_mempool)
924 {
925         struct ceph_osd_request *req;
926         u64 objnum = 0;
927         u64 objoff = 0;
928         u64 objlen = 0;
929         int r;
930
931         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
932                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
933                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
934
935         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
936                                         GFP_NOFS);
937         if (!req) {
938                 r = -ENOMEM;
939                 goto fail;
940         }
941
942         /* calculate max write size */
943         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
944         if (r)
945                 goto fail;
946
947         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
948                 osd_req_op_init(req, which, opcode, 0);
949         } else {
950                 u32 object_size = layout->object_size;
951                 u32 object_base = off - objoff;
952                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
953                         if (truncate_size <= object_base) {
954                                 truncate_size = 0;
955                         } else {
956                                 truncate_size -= object_base;
957                                 if (truncate_size > object_size)
958                                         truncate_size = object_size;
959                         }
960                 }
961                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
962                                        truncate_size, truncate_seq);
963         }
964
965         req->r_flags = flags;
966         req->r_base_oloc.pool = layout->pool_id;
967         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
968         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
969
970         req->r_snapid = vino.snap;
971         if (flags & CEPH_OSD_FLAG_WRITE)
972                 req->r_data_offset = off;
973
974         r = ceph_osdc_alloc_messages(req, GFP_NOFS);
975         if (r)
976                 goto fail;
977
978         return req;
979
980 fail:
981         ceph_osdc_put_request(req);
982         return ERR_PTR(r);
983 }
984 EXPORT_SYMBOL(ceph_osdc_new_request);
985
986 /*
987  * We keep osd requests in an rbtree, sorted by ->r_tid.
988  */
989 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
990 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
991
992 static bool osd_homeless(struct ceph_osd *osd)
993 {
994         return osd->o_osd == CEPH_HOMELESS_OSD;
995 }
996
997 static bool osd_registered(struct ceph_osd *osd)
998 {
999         verify_osdc_locked(osd->o_osdc);
1000
1001         return !RB_EMPTY_NODE(&osd->o_node);
1002 }
1003
1004 /*
1005  * Assumes @osd is zero-initialized.
1006  */
1007 static void osd_init(struct ceph_osd *osd)
1008 {
1009         atomic_set(&osd->o_ref, 1);
1010         RB_CLEAR_NODE(&osd->o_node);
1011         osd->o_requests = RB_ROOT;
1012         osd->o_linger_requests = RB_ROOT;
1013         INIT_LIST_HEAD(&osd->o_osd_lru);
1014         INIT_LIST_HEAD(&osd->o_keepalive_item);
1015         osd->o_incarnation = 1;
1016         mutex_init(&osd->lock);
1017 }
1018
1019 static void osd_cleanup(struct ceph_osd *osd)
1020 {
1021         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1022         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1023         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1024         WARN_ON(!list_empty(&osd->o_osd_lru));
1025         WARN_ON(!list_empty(&osd->o_keepalive_item));
1026
1027         if (osd->o_auth.authorizer) {
1028                 WARN_ON(osd_homeless(osd));
1029                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1030         }
1031 }
1032
1033 /*
1034  * Track open sessions with osds.
1035  */
1036 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1037 {
1038         struct ceph_osd *osd;
1039
1040         WARN_ON(onum == CEPH_HOMELESS_OSD);
1041
1042         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1043         osd_init(osd);
1044         osd->o_osdc = osdc;
1045         osd->o_osd = onum;
1046
1047         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1048
1049         return osd;
1050 }
1051
1052 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1053 {
1054         if (atomic_inc_not_zero(&osd->o_ref)) {
1055                 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
1056                      atomic_read(&osd->o_ref));
1057                 return osd;
1058         } else {
1059                 dout("get_osd %p FAIL\n", osd);
1060                 return NULL;
1061         }
1062 }
1063
1064 static void put_osd(struct ceph_osd *osd)
1065 {
1066         dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1067              atomic_read(&osd->o_ref) - 1);
1068         if (atomic_dec_and_test(&osd->o_ref)) {
1069                 osd_cleanup(osd);
1070                 kfree(osd);
1071         }
1072 }
1073
1074 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1075
1076 static void __move_osd_to_lru(struct ceph_osd *osd)
1077 {
1078         struct ceph_osd_client *osdc = osd->o_osdc;
1079
1080         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1081         BUG_ON(!list_empty(&osd->o_osd_lru));
1082
1083         spin_lock(&osdc->osd_lru_lock);
1084         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1085         spin_unlock(&osdc->osd_lru_lock);
1086
1087         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1088 }
1089
1090 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1091 {
1092         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1093             RB_EMPTY_ROOT(&osd->o_linger_requests))
1094                 __move_osd_to_lru(osd);
1095 }
1096
1097 static void __remove_osd_from_lru(struct ceph_osd *osd)
1098 {
1099         struct ceph_osd_client *osdc = osd->o_osdc;
1100
1101         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1102
1103         spin_lock(&osdc->osd_lru_lock);
1104         if (!list_empty(&osd->o_osd_lru))
1105                 list_del_init(&osd->o_osd_lru);
1106         spin_unlock(&osdc->osd_lru_lock);
1107 }
1108
1109 /*
1110  * Close the connection and assign any leftover requests to the
1111  * homeless session.
1112  */
1113 static void close_osd(struct ceph_osd *osd)
1114 {
1115         struct ceph_osd_client *osdc = osd->o_osdc;
1116         struct rb_node *n;
1117
1118         verify_osdc_wrlocked(osdc);
1119         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1120
1121         ceph_con_close(&osd->o_con);
1122
1123         for (n = rb_first(&osd->o_requests); n; ) {
1124                 struct ceph_osd_request *req =
1125                     rb_entry(n, struct ceph_osd_request, r_node);
1126
1127                 n = rb_next(n); /* unlink_request() */
1128
1129                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1130                 unlink_request(osd, req);
1131                 link_request(&osdc->homeless_osd, req);
1132         }
1133         for (n = rb_first(&osd->o_linger_requests); n; ) {
1134                 struct ceph_osd_linger_request *lreq =
1135                     rb_entry(n, struct ceph_osd_linger_request, node);
1136
1137                 n = rb_next(n); /* unlink_linger() */
1138
1139                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1140                      lreq->linger_id);
1141                 unlink_linger(osd, lreq);
1142                 link_linger(&osdc->homeless_osd, lreq);
1143         }
1144
1145         __remove_osd_from_lru(osd);
1146         erase_osd(&osdc->osds, osd);
1147         put_osd(osd);
1148 }
1149
1150 /*
1151  * reset osd connect
1152  */
1153 static int reopen_osd(struct ceph_osd *osd)
1154 {
1155         struct ceph_entity_addr *peer_addr;
1156
1157         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1158
1159         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1160             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1161                 close_osd(osd);
1162                 return -ENODEV;
1163         }
1164
1165         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1166         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1167                         !ceph_con_opened(&osd->o_con)) {
1168                 struct rb_node *n;
1169
1170                 dout("osd addr hasn't changed and connection never opened, "
1171                      "letting msgr retry\n");
1172                 /* touch each r_stamp for handle_timeout()'s benfit */
1173                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1174                         struct ceph_osd_request *req =
1175                             rb_entry(n, struct ceph_osd_request, r_node);
1176                         req->r_stamp = jiffies;
1177                 }
1178
1179                 return -EAGAIN;
1180         }
1181
1182         ceph_con_close(&osd->o_con);
1183         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1184         osd->o_incarnation++;
1185
1186         return 0;
1187 }
1188
1189 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1190                                           bool wrlocked)
1191 {
1192         struct ceph_osd *osd;
1193
1194         if (wrlocked)
1195                 verify_osdc_wrlocked(osdc);
1196         else
1197                 verify_osdc_locked(osdc);
1198
1199         if (o != CEPH_HOMELESS_OSD)
1200                 osd = lookup_osd(&osdc->osds, o);
1201         else
1202                 osd = &osdc->homeless_osd;
1203         if (!osd) {
1204                 if (!wrlocked)
1205                         return ERR_PTR(-EAGAIN);
1206
1207                 osd = create_osd(osdc, o);
1208                 insert_osd(&osdc->osds, osd);
1209                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1210                               &osdc->osdmap->osd_addr[osd->o_osd]);
1211         }
1212
1213         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1214         return osd;
1215 }
1216
1217 /*
1218  * Create request <-> OSD session relation.
1219  *
1220  * @req has to be assigned a tid, @osd may be homeless.
1221  */
1222 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1223 {
1224         verify_osd_locked(osd);
1225         WARN_ON(!req->r_tid || req->r_osd);
1226         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1227              req, req->r_tid);
1228
1229         if (!osd_homeless(osd))
1230                 __remove_osd_from_lru(osd);
1231         else
1232                 atomic_inc(&osd->o_osdc->num_homeless);
1233
1234         get_osd(osd);
1235         insert_request(&osd->o_requests, req);
1236         req->r_osd = osd;
1237 }
1238
1239 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1240 {
1241         verify_osd_locked(osd);
1242         WARN_ON(req->r_osd != osd);
1243         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1244              req, req->r_tid);
1245
1246         req->r_osd = NULL;
1247         erase_request(&osd->o_requests, req);
1248         put_osd(osd);
1249
1250         if (!osd_homeless(osd))
1251                 maybe_move_osd_to_lru(osd);
1252         else
1253                 atomic_dec(&osd->o_osdc->num_homeless);
1254 }
1255
1256 static bool __pool_full(struct ceph_pg_pool_info *pi)
1257 {
1258         return pi->flags & CEPH_POOL_FLAG_FULL;
1259 }
1260
1261 static bool have_pool_full(struct ceph_osd_client *osdc)
1262 {
1263         struct rb_node *n;
1264
1265         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1266                 struct ceph_pg_pool_info *pi =
1267                     rb_entry(n, struct ceph_pg_pool_info, node);
1268
1269                 if (__pool_full(pi))
1270                         return true;
1271         }
1272
1273         return false;
1274 }
1275
1276 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1277 {
1278         struct ceph_pg_pool_info *pi;
1279
1280         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1281         if (!pi)
1282                 return false;
1283
1284         return __pool_full(pi);
1285 }
1286
1287 /*
1288  * Returns whether a request should be blocked from being sent
1289  * based on the current osdmap and osd_client settings.
1290  */
1291 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1292                                     const struct ceph_osd_request_target *t,
1293                                     struct ceph_pg_pool_info *pi)
1294 {
1295         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1296         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1297                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1298                        __pool_full(pi);
1299
1300         WARN_ON(pi->id != t->base_oloc.pool);
1301         return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
1302                (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
1303 }
1304
1305 enum calc_target_result {
1306         CALC_TARGET_NO_ACTION = 0,
1307         CALC_TARGET_NEED_RESEND,
1308         CALC_TARGET_POOL_DNE,
1309 };
1310
1311 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1312                                            struct ceph_osd_request_target *t,
1313                                            u32 *last_force_resend,
1314                                            bool any_change)
1315 {
1316         struct ceph_pg_pool_info *pi;
1317         struct ceph_pg pgid, last_pgid;
1318         struct ceph_osds up, acting;
1319         bool force_resend = false;
1320         bool need_check_tiering = false;
1321         bool need_resend = false;
1322         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1323         enum calc_target_result ct_res;
1324         int ret;
1325
1326         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1327         if (!pi) {
1328                 t->osd = CEPH_HOMELESS_OSD;
1329                 ct_res = CALC_TARGET_POOL_DNE;
1330                 goto out;
1331         }
1332
1333         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1334                 if (last_force_resend &&
1335                     *last_force_resend < pi->last_force_request_resend) {
1336                         *last_force_resend = pi->last_force_request_resend;
1337                         force_resend = true;
1338                 } else if (!last_force_resend) {
1339                         force_resend = true;
1340                 }
1341         }
1342         if (ceph_oid_empty(&t->target_oid) || force_resend) {
1343                 ceph_oid_copy(&t->target_oid, &t->base_oid);
1344                 need_check_tiering = true;
1345         }
1346         if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1347                 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1348                 need_check_tiering = true;
1349         }
1350
1351         if (need_check_tiering &&
1352             (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1353                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1354                         t->target_oloc.pool = pi->read_tier;
1355                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1356                         t->target_oloc.pool = pi->write_tier;
1357         }
1358
1359         ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1360                                         &t->target_oloc, &pgid);
1361         if (ret) {
1362                 WARN_ON(ret != -ENOENT);
1363                 t->osd = CEPH_HOMELESS_OSD;
1364                 ct_res = CALC_TARGET_POOL_DNE;
1365                 goto out;
1366         }
1367         last_pgid.pool = pgid.pool;
1368         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1369
1370         ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1371         if (any_change &&
1372             ceph_is_new_interval(&t->acting,
1373                                  &acting,
1374                                  &t->up,
1375                                  &up,
1376                                  t->size,
1377                                  pi->size,
1378                                  t->min_size,
1379                                  pi->min_size,
1380                                  t->pg_num,
1381                                  pi->pg_num,
1382                                  t->sort_bitwise,
1383                                  sort_bitwise,
1384                                  &last_pgid))
1385                 force_resend = true;
1386
1387         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1388                 t->paused = false;
1389                 need_resend = true;
1390         }
1391
1392         if (ceph_pg_compare(&t->pgid, &pgid) ||
1393             ceph_osds_changed(&t->acting, &acting, any_change) ||
1394             force_resend) {
1395                 t->pgid = pgid; /* struct */
1396                 ceph_osds_copy(&t->acting, &acting);
1397                 ceph_osds_copy(&t->up, &up);
1398                 t->size = pi->size;
1399                 t->min_size = pi->min_size;
1400                 t->pg_num = pi->pg_num;
1401                 t->pg_num_mask = pi->pg_num_mask;
1402                 t->sort_bitwise = sort_bitwise;
1403
1404                 t->osd = acting.primary;
1405                 need_resend = true;
1406         }
1407
1408         ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1409 out:
1410         dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1411         return ct_res;
1412 }
1413
1414 static void setup_request_data(struct ceph_osd_request *req,
1415                                struct ceph_msg *msg)
1416 {
1417         u32 data_len = 0;
1418         int i;
1419
1420         if (!list_empty(&msg->data))
1421                 return;
1422
1423         WARN_ON(msg->data_length);
1424         for (i = 0; i < req->r_num_ops; i++) {
1425                 struct ceph_osd_req_op *op = &req->r_ops[i];
1426
1427                 switch (op->op) {
1428                 /* request */
1429                 case CEPH_OSD_OP_WRITE:
1430                 case CEPH_OSD_OP_WRITEFULL:
1431                         WARN_ON(op->indata_len != op->extent.length);
1432                         ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1433                         break;
1434                 case CEPH_OSD_OP_SETXATTR:
1435                 case CEPH_OSD_OP_CMPXATTR:
1436                         WARN_ON(op->indata_len != op->xattr.name_len +
1437                                                   op->xattr.value_len);
1438                         ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1439                         break;
1440                 case CEPH_OSD_OP_NOTIFY_ACK:
1441                         ceph_osdc_msg_data_add(msg,
1442                                                &op->notify_ack.request_data);
1443                         break;
1444
1445                 /* reply */
1446                 case CEPH_OSD_OP_STAT:
1447                         ceph_osdc_msg_data_add(req->r_reply,
1448                                                &op->raw_data_in);
1449                         break;
1450                 case CEPH_OSD_OP_READ:
1451                         ceph_osdc_msg_data_add(req->r_reply,
1452                                                &op->extent.osd_data);
1453                         break;
1454                 case CEPH_OSD_OP_LIST_WATCHERS:
1455                         ceph_osdc_msg_data_add(req->r_reply,
1456                                                &op->list_watchers.response_data);
1457                         break;
1458
1459                 /* both */
1460                 case CEPH_OSD_OP_CALL:
1461                         WARN_ON(op->indata_len != op->cls.class_len +
1462                                                   op->cls.method_len +
1463                                                   op->cls.indata_len);
1464                         ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1465                         /* optional, can be NONE */
1466                         ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1467                         /* optional, can be NONE */
1468                         ceph_osdc_msg_data_add(req->r_reply,
1469                                                &op->cls.response_data);
1470                         break;
1471                 case CEPH_OSD_OP_NOTIFY:
1472                         ceph_osdc_msg_data_add(msg,
1473                                                &op->notify.request_data);
1474                         ceph_osdc_msg_data_add(req->r_reply,
1475                                                &op->notify.response_data);
1476                         break;
1477                 }
1478
1479                 data_len += op->indata_len;
1480         }
1481
1482         WARN_ON(data_len != msg->data_length);
1483 }
1484
1485 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1486 {
1487         void *p = msg->front.iov_base;
1488         void *const end = p + msg->front_alloc_len;
1489         u32 data_len = 0;
1490         int i;
1491
1492         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1493                 /* snapshots aren't writeable */
1494                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1495         } else {
1496                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1497                         req->r_data_offset || req->r_snapc);
1498         }
1499
1500         setup_request_data(req, msg);
1501
1502         ceph_encode_32(&p, 1); /* client_inc, always 1 */
1503         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1504         ceph_encode_32(&p, req->r_flags);
1505         ceph_encode_timespec(p, &req->r_mtime);
1506         p += sizeof(struct ceph_timespec);
1507         /* aka reassert_version */
1508         memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
1509         p += sizeof(req->r_replay_version);
1510
1511         /* oloc */
1512         ceph_start_encoding(&p, 5, 4,
1513                             ceph_oloc_encoding_size(&req->r_t.target_oloc));
1514         ceph_encode_64(&p, req->r_t.target_oloc.pool);
1515         ceph_encode_32(&p, -1); /* preferred */
1516         ceph_encode_32(&p, 0); /* key len */
1517         if (req->r_t.target_oloc.pool_ns)
1518                 ceph_encode_string(&p, end, req->r_t.target_oloc.pool_ns->str,
1519                                    req->r_t.target_oloc.pool_ns->len);
1520         else
1521                 ceph_encode_32(&p, 0);
1522
1523         /* pgid */
1524         ceph_encode_8(&p, 1);
1525         ceph_encode_64(&p, req->r_t.pgid.pool);
1526         ceph_encode_32(&p, req->r_t.pgid.seed);
1527         ceph_encode_32(&p, -1); /* preferred */
1528
1529         /* oid */
1530         ceph_encode_32(&p, req->r_t.target_oid.name_len);
1531         memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1532         p += req->r_t.target_oid.name_len;
1533
1534         /* ops, can imply data */
1535         ceph_encode_16(&p, req->r_num_ops);
1536         for (i = 0; i < req->r_num_ops; i++) {
1537                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1538                 p += sizeof(struct ceph_osd_op);
1539         }
1540
1541         ceph_encode_64(&p, req->r_snapid); /* snapid */
1542         if (req->r_snapc) {
1543                 ceph_encode_64(&p, req->r_snapc->seq);
1544                 ceph_encode_32(&p, req->r_snapc->num_snaps);
1545                 for (i = 0; i < req->r_snapc->num_snaps; i++)
1546                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
1547         } else {
1548                 ceph_encode_64(&p, 0); /* snap_seq */
1549                 ceph_encode_32(&p, 0); /* snaps len */
1550         }
1551
1552         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1553
1554         BUG_ON(p > end);
1555         msg->front.iov_len = p - msg->front.iov_base;
1556         msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1557         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1558         msg->hdr.data_len = cpu_to_le32(data_len);
1559         /*
1560          * The header "data_off" is a hint to the receiver allowing it
1561          * to align received data into its buffers such that there's no
1562          * need to re-copy it before writing it to disk (direct I/O).
1563          */
1564         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1565
1566         dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1567              req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1568              msg->front.iov_len, data_len);
1569 }
1570
1571 /*
1572  * @req has to be assigned a tid and registered.
1573  */
1574 static void send_request(struct ceph_osd_request *req)
1575 {
1576         struct ceph_osd *osd = req->r_osd;
1577
1578         verify_osd_locked(osd);
1579         WARN_ON(osd->o_osd != req->r_t.osd);
1580
1581         /*
1582          * We may have a previously queued request message hanging
1583          * around.  Cancel it to avoid corrupting the msgr.
1584          */
1585         if (req->r_sent)
1586                 ceph_msg_revoke(req->r_request);
1587
1588         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1589         if (req->r_attempts)
1590                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1591         else
1592                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1593
1594         encode_request(req, req->r_request);
1595
1596         dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1597              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1598              req->r_t.osd, req->r_flags, req->r_attempts);
1599
1600         req->r_t.paused = false;
1601         req->r_stamp = jiffies;
1602         req->r_attempts++;
1603
1604         req->r_sent = osd->o_incarnation;
1605         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1606         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1607 }
1608
1609 static void maybe_request_map(struct ceph_osd_client *osdc)
1610 {
1611         bool continuous = false;
1612
1613         verify_osdc_locked(osdc);
1614         WARN_ON(!osdc->osdmap->epoch);
1615
1616         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1617             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1618             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1619                 dout("%s osdc %p continuous\n", __func__, osdc);
1620                 continuous = true;
1621         } else {
1622                 dout("%s osdc %p onetime\n", __func__, osdc);
1623         }
1624
1625         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
1626                                osdc->osdmap->epoch + 1, continuous))
1627                 ceph_monc_renew_subs(&osdc->client->monc);
1628 }
1629
1630 static void send_map_check(struct ceph_osd_request *req);
1631
1632 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
1633 {
1634         struct ceph_osd_client *osdc = req->r_osdc;
1635         struct ceph_osd *osd;
1636         enum calc_target_result ct_res;
1637         bool need_send = false;
1638         bool promoted = false;
1639
1640         WARN_ON(req->r_tid || req->r_got_reply);
1641         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
1642
1643 again:
1644         ct_res = calc_target(osdc, &req->r_t, &req->r_last_force_resend, false);
1645         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
1646                 goto promote;
1647
1648         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
1649         if (IS_ERR(osd)) {
1650                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
1651                 goto promote;
1652         }
1653
1654         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1655             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1656                 dout("req %p pausewr\n", req);
1657                 req->r_t.paused = true;
1658                 maybe_request_map(osdc);
1659         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1660                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1661                 dout("req %p pauserd\n", req);
1662                 req->r_t.paused = true;
1663                 maybe_request_map(osdc);
1664         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1665                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1666                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
1667                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1668                     pool_full(osdc, req->r_t.base_oloc.pool))) {
1669                 dout("req %p full/pool_full\n", req);
1670                 pr_warn_ratelimited("FULL or reached pool quota\n");
1671                 req->r_t.paused = true;
1672                 maybe_request_map(osdc);
1673         } else if (!osd_homeless(osd)) {
1674                 need_send = true;
1675         } else {
1676                 maybe_request_map(osdc);
1677         }
1678
1679         mutex_lock(&osd->lock);
1680         /*
1681          * Assign the tid atomically with send_request() to protect
1682          * multiple writes to the same object from racing with each
1683          * other, resulting in out of order ops on the OSDs.
1684          */
1685         req->r_tid = atomic64_inc_return(&osdc->last_tid);
1686         link_request(osd, req);
1687         if (need_send)
1688                 send_request(req);
1689         mutex_unlock(&osd->lock);
1690
1691         if (ct_res == CALC_TARGET_POOL_DNE)
1692                 send_map_check(req);
1693
1694         if (promoted)
1695                 downgrade_write(&osdc->lock);
1696         return;
1697
1698 promote:
1699         up_read(&osdc->lock);
1700         down_write(&osdc->lock);
1701         wrlocked = true;
1702         promoted = true;
1703         goto again;
1704 }
1705
1706 static void account_request(struct ceph_osd_request *req)
1707 {
1708         unsigned int mask = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
1709
1710         if (req->r_flags & CEPH_OSD_FLAG_READ) {
1711                 WARN_ON(req->r_flags & mask);
1712                 req->r_flags |= CEPH_OSD_FLAG_ACK;
1713         } else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
1714                 WARN_ON(!(req->r_flags & mask));
1715         else
1716                 WARN_ON(1);
1717
1718         WARN_ON(req->r_unsafe_callback && (req->r_flags & mask) != mask);
1719         atomic_inc(&req->r_osdc->num_requests);
1720 }
1721
1722 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
1723 {
1724         ceph_osdc_get_request(req);
1725         account_request(req);
1726         __submit_request(req, wrlocked);
1727 }
1728
1729 static void __finish_request(struct ceph_osd_request *req)
1730 {
1731         struct ceph_osd_client *osdc = req->r_osdc;
1732         struct ceph_osd *osd = req->r_osd;
1733
1734         verify_osd_locked(osd);
1735         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1736
1737         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
1738         unlink_request(osd, req);
1739         atomic_dec(&osdc->num_requests);
1740
1741         /*
1742          * If an OSD has failed or returned and a request has been sent
1743          * twice, it's possible to get a reply and end up here while the
1744          * request message is queued for delivery.  We will ignore the
1745          * reply, so not a big deal, but better to try and catch it.
1746          */
1747         ceph_msg_revoke(req->r_request);
1748         ceph_msg_revoke_incoming(req->r_reply);
1749 }
1750
1751 static void finish_request(struct ceph_osd_request *req)
1752 {
1753         __finish_request(req);
1754         ceph_osdc_put_request(req);
1755 }
1756
1757 static void __complete_request(struct ceph_osd_request *req)
1758 {
1759         if (req->r_callback)
1760                 req->r_callback(req);
1761         else
1762                 complete_all(&req->r_completion);
1763 }
1764
1765 /*
1766  * Note that this is open-coded in handle_reply(), which has to deal
1767  * with ack vs commit, dup acks, etc.
1768  */
1769 static void complete_request(struct ceph_osd_request *req, int err)
1770 {
1771         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1772
1773         req->r_result = err;
1774         __finish_request(req);
1775         __complete_request(req);
1776         complete_all(&req->r_safe_completion);
1777         ceph_osdc_put_request(req);
1778 }
1779
1780 static void cancel_map_check(struct ceph_osd_request *req)
1781 {
1782         struct ceph_osd_client *osdc = req->r_osdc;
1783         struct ceph_osd_request *lookup_req;
1784
1785         verify_osdc_wrlocked(osdc);
1786
1787         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1788         if (!lookup_req)
1789                 return;
1790
1791         WARN_ON(lookup_req != req);
1792         erase_request_mc(&osdc->map_checks, req);
1793         ceph_osdc_put_request(req);
1794 }
1795
1796 static void cancel_request(struct ceph_osd_request *req)
1797 {
1798         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1799
1800         cancel_map_check(req);
1801         finish_request(req);
1802 }
1803
1804 static void check_pool_dne(struct ceph_osd_request *req)
1805 {
1806         struct ceph_osd_client *osdc = req->r_osdc;
1807         struct ceph_osdmap *map = osdc->osdmap;
1808
1809         verify_osdc_wrlocked(osdc);
1810         WARN_ON(!map->epoch);
1811
1812         if (req->r_attempts) {
1813                 /*
1814                  * We sent a request earlier, which means that
1815                  * previously the pool existed, and now it does not
1816                  * (i.e., it was deleted).
1817                  */
1818                 req->r_map_dne_bound = map->epoch;
1819                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
1820                      req->r_tid);
1821         } else {
1822                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
1823                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
1824         }
1825
1826         if (req->r_map_dne_bound) {
1827                 if (map->epoch >= req->r_map_dne_bound) {
1828                         /* we had a new enough map */
1829                         pr_info_ratelimited("tid %llu pool does not exist\n",
1830                                             req->r_tid);
1831                         complete_request(req, -ENOENT);
1832                 }
1833         } else {
1834                 send_map_check(req);
1835         }
1836 }
1837
1838 static void map_check_cb(struct ceph_mon_generic_request *greq)
1839 {
1840         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
1841         struct ceph_osd_request *req;
1842         u64 tid = greq->private_data;
1843
1844         WARN_ON(greq->result || !greq->u.newest);
1845
1846         down_write(&osdc->lock);
1847         req = lookup_request_mc(&osdc->map_checks, tid);
1848         if (!req) {
1849                 dout("%s tid %llu dne\n", __func__, tid);
1850                 goto out_unlock;
1851         }
1852
1853         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
1854              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
1855         if (!req->r_map_dne_bound)
1856                 req->r_map_dne_bound = greq->u.newest;
1857         erase_request_mc(&osdc->map_checks, req);
1858         check_pool_dne(req);
1859
1860         ceph_osdc_put_request(req);
1861 out_unlock:
1862         up_write(&osdc->lock);
1863 }
1864
1865 static void send_map_check(struct ceph_osd_request *req)
1866 {
1867         struct ceph_osd_client *osdc = req->r_osdc;
1868         struct ceph_osd_request *lookup_req;
1869         int ret;
1870
1871         verify_osdc_wrlocked(osdc);
1872
1873         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1874         if (lookup_req) {
1875                 WARN_ON(lookup_req != req);
1876                 return;
1877         }
1878
1879         ceph_osdc_get_request(req);
1880         insert_request_mc(&osdc->map_checks, req);
1881         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
1882                                           map_check_cb, req->r_tid);
1883         WARN_ON(ret);
1884 }
1885
1886 /*
1887  * lingering requests, watch/notify v2 infrastructure
1888  */
1889 static void linger_release(struct kref *kref)
1890 {
1891         struct ceph_osd_linger_request *lreq =
1892             container_of(kref, struct ceph_osd_linger_request, kref);
1893
1894         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
1895              lreq->reg_req, lreq->ping_req);
1896         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
1897         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
1898         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
1899         WARN_ON(!list_empty(&lreq->scan_item));
1900         WARN_ON(!list_empty(&lreq->pending_lworks));
1901         WARN_ON(lreq->osd);
1902
1903         if (lreq->reg_req)
1904                 ceph_osdc_put_request(lreq->reg_req);
1905         if (lreq->ping_req)
1906                 ceph_osdc_put_request(lreq->ping_req);
1907         target_destroy(&lreq->t);
1908         kfree(lreq);
1909 }
1910
1911 static void linger_put(struct ceph_osd_linger_request *lreq)
1912 {
1913         if (lreq)
1914                 kref_put(&lreq->kref, linger_release);
1915 }
1916
1917 static struct ceph_osd_linger_request *
1918 linger_get(struct ceph_osd_linger_request *lreq)
1919 {
1920         kref_get(&lreq->kref);
1921         return lreq;
1922 }
1923
1924 static struct ceph_osd_linger_request *
1925 linger_alloc(struct ceph_osd_client *osdc)
1926 {
1927         struct ceph_osd_linger_request *lreq;
1928
1929         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
1930         if (!lreq)
1931                 return NULL;
1932
1933         kref_init(&lreq->kref);
1934         mutex_init(&lreq->lock);
1935         RB_CLEAR_NODE(&lreq->node);
1936         RB_CLEAR_NODE(&lreq->osdc_node);
1937         RB_CLEAR_NODE(&lreq->mc_node);
1938         INIT_LIST_HEAD(&lreq->scan_item);
1939         INIT_LIST_HEAD(&lreq->pending_lworks);
1940         init_completion(&lreq->reg_commit_wait);
1941         init_completion(&lreq->notify_finish_wait);
1942
1943         lreq->osdc = osdc;
1944         target_init(&lreq->t);
1945
1946         dout("%s lreq %p\n", __func__, lreq);
1947         return lreq;
1948 }
1949
1950 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
1951 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
1952 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
1953
1954 /*
1955  * Create linger request <-> OSD session relation.
1956  *
1957  * @lreq has to be registered, @osd may be homeless.
1958  */
1959 static void link_linger(struct ceph_osd *osd,
1960                         struct ceph_osd_linger_request *lreq)
1961 {
1962         verify_osd_locked(osd);
1963         WARN_ON(!lreq->linger_id || lreq->osd);
1964         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1965              osd->o_osd, lreq, lreq->linger_id);
1966
1967         if (!osd_homeless(osd))
1968                 __remove_osd_from_lru(osd);
1969         else
1970                 atomic_inc(&osd->o_osdc->num_homeless);
1971
1972         get_osd(osd);
1973         insert_linger(&osd->o_linger_requests, lreq);
1974         lreq->osd = osd;
1975 }
1976
1977 static void unlink_linger(struct ceph_osd *osd,
1978                           struct ceph_osd_linger_request *lreq)
1979 {
1980         verify_osd_locked(osd);
1981         WARN_ON(lreq->osd != osd);
1982         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1983              osd->o_osd, lreq, lreq->linger_id);
1984
1985         lreq->osd = NULL;
1986         erase_linger(&osd->o_linger_requests, lreq);
1987         put_osd(osd);
1988
1989         if (!osd_homeless(osd))
1990                 maybe_move_osd_to_lru(osd);
1991         else
1992                 atomic_dec(&osd->o_osdc->num_homeless);
1993 }
1994
1995 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
1996 {
1997         verify_osdc_locked(lreq->osdc);
1998
1999         return !RB_EMPTY_NODE(&lreq->osdc_node);
2000 }
2001
2002 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2003 {
2004         struct ceph_osd_client *osdc = lreq->osdc;
2005         bool registered;
2006
2007         down_read(&osdc->lock);
2008         registered = __linger_registered(lreq);
2009         up_read(&osdc->lock);
2010
2011         return registered;
2012 }
2013
2014 static void linger_register(struct ceph_osd_linger_request *lreq)
2015 {
2016         struct ceph_osd_client *osdc = lreq->osdc;
2017
2018         verify_osdc_wrlocked(osdc);
2019         WARN_ON(lreq->linger_id);
2020
2021         linger_get(lreq);
2022         lreq->linger_id = ++osdc->last_linger_id;
2023         insert_linger_osdc(&osdc->linger_requests, lreq);
2024 }
2025
2026 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2027 {
2028         struct ceph_osd_client *osdc = lreq->osdc;
2029
2030         verify_osdc_wrlocked(osdc);
2031
2032         erase_linger_osdc(&osdc->linger_requests, lreq);
2033         linger_put(lreq);
2034 }
2035
2036 static void cancel_linger_request(struct ceph_osd_request *req)
2037 {
2038         struct ceph_osd_linger_request *lreq = req->r_priv;
2039
2040         WARN_ON(!req->r_linger);
2041         cancel_request(req);
2042         linger_put(lreq);
2043 }
2044
2045 struct linger_work {
2046         struct work_struct work;
2047         struct ceph_osd_linger_request *lreq;
2048         struct list_head pending_item;
2049         unsigned long queued_stamp;
2050
2051         union {
2052                 struct {
2053                         u64 notify_id;
2054                         u64 notifier_id;
2055                         void *payload; /* points into @msg front */
2056                         size_t payload_len;
2057
2058                         struct ceph_msg *msg; /* for ceph_msg_put() */
2059                 } notify;
2060                 struct {
2061                         int err;
2062                 } error;
2063         };
2064 };
2065
2066 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2067                                        work_func_t workfn)
2068 {
2069         struct linger_work *lwork;
2070
2071         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2072         if (!lwork)
2073                 return NULL;
2074
2075         INIT_WORK(&lwork->work, workfn);
2076         INIT_LIST_HEAD(&lwork->pending_item);
2077         lwork->lreq = linger_get(lreq);
2078
2079         return lwork;
2080 }
2081
2082 static void lwork_free(struct linger_work *lwork)
2083 {
2084         struct ceph_osd_linger_request *lreq = lwork->lreq;
2085
2086         mutex_lock(&lreq->lock);
2087         list_del(&lwork->pending_item);
2088         mutex_unlock(&lreq->lock);
2089
2090         linger_put(lreq);
2091         kfree(lwork);
2092 }
2093
2094 static void lwork_queue(struct linger_work *lwork)
2095 {
2096         struct ceph_osd_linger_request *lreq = lwork->lreq;
2097         struct ceph_osd_client *osdc = lreq->osdc;
2098
2099         verify_lreq_locked(lreq);
2100         WARN_ON(!list_empty(&lwork->pending_item));
2101
2102         lwork->queued_stamp = jiffies;
2103         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2104         queue_work(osdc->notify_wq, &lwork->work);
2105 }
2106
2107 static void do_watch_notify(struct work_struct *w)
2108 {
2109         struct linger_work *lwork = container_of(w, struct linger_work, work);
2110         struct ceph_osd_linger_request *lreq = lwork->lreq;
2111
2112         if (!linger_registered(lreq)) {
2113                 dout("%s lreq %p not registered\n", __func__, lreq);
2114                 goto out;
2115         }
2116
2117         WARN_ON(!lreq->is_watch);
2118         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2119              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2120              lwork->notify.payload_len);
2121         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2122                   lwork->notify.notifier_id, lwork->notify.payload,
2123                   lwork->notify.payload_len);
2124
2125 out:
2126         ceph_msg_put(lwork->notify.msg);
2127         lwork_free(lwork);
2128 }
2129
2130 static void do_watch_error(struct work_struct *w)
2131 {
2132         struct linger_work *lwork = container_of(w, struct linger_work, work);
2133         struct ceph_osd_linger_request *lreq = lwork->lreq;
2134
2135         if (!linger_registered(lreq)) {
2136                 dout("%s lreq %p not registered\n", __func__, lreq);
2137                 goto out;
2138         }
2139
2140         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2141         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2142
2143 out:
2144         lwork_free(lwork);
2145 }
2146
2147 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2148 {
2149         struct linger_work *lwork;
2150
2151         lwork = lwork_alloc(lreq, do_watch_error);
2152         if (!lwork) {
2153                 pr_err("failed to allocate error-lwork\n");
2154                 return;
2155         }
2156
2157         lwork->error.err = lreq->last_error;
2158         lwork_queue(lwork);
2159 }
2160
2161 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2162                                        int result)
2163 {
2164         if (!completion_done(&lreq->reg_commit_wait)) {
2165                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2166                 complete_all(&lreq->reg_commit_wait);
2167         }
2168 }
2169
2170 static void linger_commit_cb(struct ceph_osd_request *req)
2171 {
2172         struct ceph_osd_linger_request *lreq = req->r_priv;
2173
2174         mutex_lock(&lreq->lock);
2175         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2176              lreq->linger_id, req->r_result);
2177         WARN_ON(!__linger_registered(lreq));
2178         linger_reg_commit_complete(lreq, req->r_result);
2179         lreq->committed = true;
2180
2181         if (!lreq->is_watch) {
2182                 struct ceph_osd_data *osd_data =
2183                     osd_req_op_data(req, 0, notify, response_data);
2184                 void *p = page_address(osd_data->pages[0]);
2185
2186                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2187                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2188
2189                 /* make note of the notify_id */
2190                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2191                         lreq->notify_id = ceph_decode_64(&p);
2192                         dout("lreq %p notify_id %llu\n", lreq,
2193                              lreq->notify_id);
2194                 } else {
2195                         dout("lreq %p no notify_id\n", lreq);
2196                 }
2197         }
2198
2199         mutex_unlock(&lreq->lock);
2200         linger_put(lreq);
2201 }
2202
2203 static int normalize_watch_error(int err)
2204 {
2205         /*
2206          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2207          * notification and a failure to reconnect because we raced with
2208          * the delete appear the same to the user.
2209          */
2210         if (err == -ENOENT)
2211                 err = -ENOTCONN;
2212
2213         return err;
2214 }
2215
2216 static void linger_reconnect_cb(struct ceph_osd_request *req)
2217 {
2218         struct ceph_osd_linger_request *lreq = req->r_priv;
2219
2220         mutex_lock(&lreq->lock);
2221         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2222              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2223         if (req->r_result < 0) {
2224                 if (!lreq->last_error) {
2225                         lreq->last_error = normalize_watch_error(req->r_result);
2226                         queue_watch_error(lreq);
2227                 }
2228         }
2229
2230         mutex_unlock(&lreq->lock);
2231         linger_put(lreq);
2232 }
2233
2234 static void send_linger(struct ceph_osd_linger_request *lreq)
2235 {
2236         struct ceph_osd_request *req = lreq->reg_req;
2237         struct ceph_osd_req_op *op = &req->r_ops[0];
2238
2239         verify_osdc_wrlocked(req->r_osdc);
2240         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2241
2242         if (req->r_osd)
2243                 cancel_linger_request(req);
2244
2245         request_reinit(req);
2246         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2247         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2248         req->r_flags = lreq->t.flags;
2249         req->r_mtime = lreq->mtime;
2250
2251         mutex_lock(&lreq->lock);
2252         if (lreq->is_watch && lreq->committed) {
2253                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2254                         op->watch.cookie != lreq->linger_id);
2255                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2256                 op->watch.gen = ++lreq->register_gen;
2257                 dout("lreq %p reconnect register_gen %u\n", lreq,
2258                      op->watch.gen);
2259                 req->r_callback = linger_reconnect_cb;
2260         } else {
2261                 if (!lreq->is_watch)
2262                         lreq->notify_id = 0;
2263                 else
2264                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2265                 dout("lreq %p register\n", lreq);
2266                 req->r_callback = linger_commit_cb;
2267         }
2268         mutex_unlock(&lreq->lock);
2269
2270         req->r_priv = linger_get(lreq);
2271         req->r_linger = true;
2272
2273         submit_request(req, true);
2274 }
2275
2276 static void linger_ping_cb(struct ceph_osd_request *req)
2277 {
2278         struct ceph_osd_linger_request *lreq = req->r_priv;
2279
2280         mutex_lock(&lreq->lock);
2281         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2282              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2283              lreq->last_error);
2284         if (lreq->register_gen == req->r_ops[0].watch.gen) {
2285                 if (!req->r_result) {
2286                         lreq->watch_valid_thru = lreq->ping_sent;
2287                 } else if (!lreq->last_error) {
2288                         lreq->last_error = normalize_watch_error(req->r_result);
2289                         queue_watch_error(lreq);
2290                 }
2291         } else {
2292                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2293                      lreq->register_gen, req->r_ops[0].watch.gen);
2294         }
2295
2296         mutex_unlock(&lreq->lock);
2297         linger_put(lreq);
2298 }
2299
2300 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2301 {
2302         struct ceph_osd_client *osdc = lreq->osdc;
2303         struct ceph_osd_request *req = lreq->ping_req;
2304         struct ceph_osd_req_op *op = &req->r_ops[0];
2305
2306         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2307                 dout("%s PAUSERD\n", __func__);
2308                 return;
2309         }
2310
2311         lreq->ping_sent = jiffies;
2312         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2313              __func__, lreq, lreq->linger_id, lreq->ping_sent,
2314              lreq->register_gen);
2315
2316         if (req->r_osd)
2317                 cancel_linger_request(req);
2318
2319         request_reinit(req);
2320         target_copy(&req->r_t, &lreq->t);
2321
2322         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2323                 op->watch.cookie != lreq->linger_id ||
2324                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2325         op->watch.gen = lreq->register_gen;
2326         req->r_callback = linger_ping_cb;
2327         req->r_priv = linger_get(lreq);
2328         req->r_linger = true;
2329
2330         ceph_osdc_get_request(req);
2331         account_request(req);
2332         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2333         link_request(lreq->osd, req);
2334         send_request(req);
2335 }
2336
2337 static void linger_submit(struct ceph_osd_linger_request *lreq)
2338 {
2339         struct ceph_osd_client *osdc = lreq->osdc;
2340         struct ceph_osd *osd;
2341
2342         calc_target(osdc, &lreq->t, &lreq->last_force_resend, false);
2343         osd = lookup_create_osd(osdc, lreq->t.osd, true);
2344         link_linger(osd, lreq);
2345
2346         send_linger(lreq);
2347 }
2348
2349 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2350 {
2351         struct ceph_osd_client *osdc = lreq->osdc;
2352         struct ceph_osd_linger_request *lookup_lreq;
2353
2354         verify_osdc_wrlocked(osdc);
2355
2356         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2357                                        lreq->linger_id);
2358         if (!lookup_lreq)
2359                 return;
2360
2361         WARN_ON(lookup_lreq != lreq);
2362         erase_linger_mc(&osdc->linger_map_checks, lreq);
2363         linger_put(lreq);
2364 }
2365
2366 /*
2367  * @lreq has to be both registered and linked.
2368  */
2369 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2370 {
2371         if (lreq->is_watch && lreq->ping_req->r_osd)
2372                 cancel_linger_request(lreq->ping_req);
2373         if (lreq->reg_req->r_osd)
2374                 cancel_linger_request(lreq->reg_req);
2375         cancel_linger_map_check(lreq);
2376         unlink_linger(lreq->osd, lreq);
2377         linger_unregister(lreq);
2378 }
2379
2380 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2381 {
2382         struct ceph_osd_client *osdc = lreq->osdc;
2383
2384         down_write(&osdc->lock);
2385         if (__linger_registered(lreq))
2386                 __linger_cancel(lreq);
2387         up_write(&osdc->lock);
2388 }
2389
2390 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2391
2392 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2393 {
2394         struct ceph_osd_client *osdc = lreq->osdc;
2395         struct ceph_osdmap *map = osdc->osdmap;
2396
2397         verify_osdc_wrlocked(osdc);
2398         WARN_ON(!map->epoch);
2399
2400         if (lreq->register_gen) {
2401                 lreq->map_dne_bound = map->epoch;
2402                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2403                      lreq, lreq->linger_id);
2404         } else {
2405                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2406                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2407                      map->epoch);
2408         }
2409
2410         if (lreq->map_dne_bound) {
2411                 if (map->epoch >= lreq->map_dne_bound) {
2412                         /* we had a new enough map */
2413                         pr_info("linger_id %llu pool does not exist\n",
2414                                 lreq->linger_id);
2415                         linger_reg_commit_complete(lreq, -ENOENT);
2416                         __linger_cancel(lreq);
2417                 }
2418         } else {
2419                 send_linger_map_check(lreq);
2420         }
2421 }
2422
2423 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2424 {
2425         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2426         struct ceph_osd_linger_request *lreq;
2427         u64 linger_id = greq->private_data;
2428
2429         WARN_ON(greq->result || !greq->u.newest);
2430
2431         down_write(&osdc->lock);
2432         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2433         if (!lreq) {
2434                 dout("%s linger_id %llu dne\n", __func__, linger_id);
2435                 goto out_unlock;
2436         }
2437
2438         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2439              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2440              greq->u.newest);
2441         if (!lreq->map_dne_bound)
2442                 lreq->map_dne_bound = greq->u.newest;
2443         erase_linger_mc(&osdc->linger_map_checks, lreq);
2444         check_linger_pool_dne(lreq);
2445
2446         linger_put(lreq);
2447 out_unlock:
2448         up_write(&osdc->lock);
2449 }
2450
2451 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
2452 {
2453         struct ceph_osd_client *osdc = lreq->osdc;
2454         struct ceph_osd_linger_request *lookup_lreq;
2455         int ret;
2456
2457         verify_osdc_wrlocked(osdc);
2458
2459         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2460                                        lreq->linger_id);
2461         if (lookup_lreq) {
2462                 WARN_ON(lookup_lreq != lreq);
2463                 return;
2464         }
2465
2466         linger_get(lreq);
2467         insert_linger_mc(&osdc->linger_map_checks, lreq);
2468         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2469                                           linger_map_check_cb, lreq->linger_id);
2470         WARN_ON(ret);
2471 }
2472
2473 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
2474 {
2475         int ret;
2476
2477         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2478         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
2479         return ret ?: lreq->reg_commit_error;
2480 }
2481
2482 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
2483 {
2484         int ret;
2485
2486         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2487         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
2488         return ret ?: lreq->notify_finish_error;
2489 }
2490
2491 /*
2492  * Timeout callback, called every N seconds.  When 1 or more OSD
2493  * requests has been active for more than N seconds, we send a keepalive
2494  * (tag + timestamp) to its OSD to ensure any communications channel
2495  * reset is detected.
2496  */
2497 static void handle_timeout(struct work_struct *work)
2498 {
2499         struct ceph_osd_client *osdc =
2500                 container_of(work, struct ceph_osd_client, timeout_work.work);
2501         struct ceph_options *opts = osdc->client->options;
2502         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2503         LIST_HEAD(slow_osds);
2504         struct rb_node *n, *p;
2505
2506         dout("%s osdc %p\n", __func__, osdc);
2507         down_write(&osdc->lock);
2508
2509         /*
2510          * ping osds that are a bit slow.  this ensures that if there
2511          * is a break in the TCP connection we will notice, and reopen
2512          * a connection with that osd (from the fault callback).
2513          */
2514         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2515                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2516                 bool found = false;
2517
2518                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
2519                         struct ceph_osd_request *req =
2520                             rb_entry(p, struct ceph_osd_request, r_node);
2521
2522                         if (time_before(req->r_stamp, cutoff)) {
2523                                 dout(" req %p tid %llu on osd%d is laggy\n",
2524                                      req, req->r_tid, osd->o_osd);
2525                                 found = true;
2526                         }
2527                 }
2528                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2529                         struct ceph_osd_linger_request *lreq =
2530                             rb_entry(p, struct ceph_osd_linger_request, node);
2531
2532                         dout(" lreq %p linger_id %llu is served by osd%d\n",
2533                              lreq, lreq->linger_id, osd->o_osd);
2534                         found = true;
2535
2536                         mutex_lock(&lreq->lock);
2537                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
2538                                 send_linger_ping(lreq);
2539                         mutex_unlock(&lreq->lock);
2540                 }
2541
2542                 if (found)
2543                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
2544         }
2545
2546         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2547                 maybe_request_map(osdc);
2548
2549         while (!list_empty(&slow_osds)) {
2550                 struct ceph_osd *osd = list_first_entry(&slow_osds,
2551                                                         struct ceph_osd,
2552                                                         o_keepalive_item);
2553                 list_del_init(&osd->o_keepalive_item);
2554                 ceph_con_keepalive(&osd->o_con);
2555         }
2556
2557         up_write(&osdc->lock);
2558         schedule_delayed_work(&osdc->timeout_work,
2559                               osdc->client->options->osd_keepalive_timeout);
2560 }
2561
2562 static void handle_osds_timeout(struct work_struct *work)
2563 {
2564         struct ceph_osd_client *osdc =
2565                 container_of(work, struct ceph_osd_client,
2566                              osds_timeout_work.work);
2567         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
2568         struct ceph_osd *osd, *nosd;
2569
2570         dout("%s osdc %p\n", __func__, osdc);
2571         down_write(&osdc->lock);
2572         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
2573                 if (time_before(jiffies, osd->lru_ttl))
2574                         break;
2575
2576                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
2577                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
2578                 close_osd(osd);
2579         }
2580
2581         up_write(&osdc->lock);
2582         schedule_delayed_work(&osdc->osds_timeout_work,
2583                               round_jiffies_relative(delay));
2584 }
2585
2586 static int ceph_oloc_decode(void **p, void *end,
2587                             struct ceph_object_locator *oloc)
2588 {
2589         u8 struct_v, struct_cv;
2590         u32 len;
2591         void *struct_end;
2592         int ret = 0;
2593
2594         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2595         struct_v = ceph_decode_8(p);
2596         struct_cv = ceph_decode_8(p);
2597         if (struct_v < 3) {
2598                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2599                         struct_v, struct_cv);
2600                 goto e_inval;
2601         }
2602         if (struct_cv > 6) {
2603                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2604                         struct_v, struct_cv);
2605                 goto e_inval;
2606         }
2607         len = ceph_decode_32(p);
2608         ceph_decode_need(p, end, len, e_inval);
2609         struct_end = *p + len;
2610
2611         oloc->pool = ceph_decode_64(p);
2612         *p += 4; /* skip preferred */
2613
2614         len = ceph_decode_32(p);
2615         if (len > 0) {
2616                 pr_warn("ceph_object_locator::key is set\n");
2617                 goto e_inval;
2618         }
2619
2620         if (struct_v >= 5) {
2621                 bool changed = false;
2622
2623                 len = ceph_decode_32(p);
2624                 if (len > 0) {
2625                         ceph_decode_need(p, end, len, e_inval);
2626                         if (!oloc->pool_ns ||
2627                             ceph_compare_string(oloc->pool_ns, *p, len))
2628                                 changed = true;
2629                         *p += len;
2630                 } else {
2631                         if (oloc->pool_ns)
2632                                 changed = true;
2633                 }
2634                 if (changed) {
2635                         /* redirect changes namespace */
2636                         pr_warn("ceph_object_locator::nspace is changed\n");
2637                         goto e_inval;
2638                 }
2639         }
2640
2641         if (struct_v >= 6) {
2642                 s64 hash = ceph_decode_64(p);
2643                 if (hash != -1) {
2644                         pr_warn("ceph_object_locator::hash is set\n");
2645                         goto e_inval;
2646                 }
2647         }
2648
2649         /* skip the rest */
2650         *p = struct_end;
2651 out:
2652         return ret;
2653
2654 e_inval:
2655         ret = -EINVAL;
2656         goto out;
2657 }
2658
2659 static int ceph_redirect_decode(void **p, void *end,
2660                                 struct ceph_request_redirect *redir)
2661 {
2662         u8 struct_v, struct_cv;
2663         u32 len;
2664         void *struct_end;
2665         int ret;
2666
2667         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2668         struct_v = ceph_decode_8(p);
2669         struct_cv = ceph_decode_8(p);
2670         if (struct_cv > 1) {
2671                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2672                         struct_v, struct_cv);
2673                 goto e_inval;
2674         }
2675         len = ceph_decode_32(p);
2676         ceph_decode_need(p, end, len, e_inval);
2677         struct_end = *p + len;
2678
2679         ret = ceph_oloc_decode(p, end, &redir->oloc);
2680         if (ret)
2681                 goto out;
2682
2683         len = ceph_decode_32(p);
2684         if (len > 0) {
2685                 pr_warn("ceph_request_redirect::object_name is set\n");
2686                 goto e_inval;
2687         }
2688
2689         len = ceph_decode_32(p);
2690         *p += len; /* skip osd_instructions */
2691
2692         /* skip the rest */
2693         *p = struct_end;
2694 out:
2695         return ret;
2696
2697 e_inval:
2698         ret = -EINVAL;
2699         goto out;
2700 }
2701
2702 struct MOSDOpReply {
2703         struct ceph_pg pgid;
2704         u64 flags;
2705         int result;
2706         u32 epoch;
2707         int num_ops;
2708         u32 outdata_len[CEPH_OSD_MAX_OPS];
2709         s32 rval[CEPH_OSD_MAX_OPS];
2710         int retry_attempt;
2711         struct ceph_eversion replay_version;
2712         u64 user_version;
2713         struct ceph_request_redirect redirect;
2714 };
2715
2716 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
2717 {
2718         void *p = msg->front.iov_base;
2719         void *const end = p + msg->front.iov_len;
2720         u16 version = le16_to_cpu(msg->hdr.version);
2721         struct ceph_eversion bad_replay_version;
2722         u8 decode_redir;
2723         u32 len;
2724         int ret;
2725         int i;
2726
2727         ceph_decode_32_safe(&p, end, len, e_inval);
2728         ceph_decode_need(&p, end, len, e_inval);
2729         p += len; /* skip oid */
2730
2731         ret = ceph_decode_pgid(&p, end, &m->pgid);
2732         if (ret)
2733                 return ret;
2734
2735         ceph_decode_64_safe(&p, end, m->flags, e_inval);
2736         ceph_decode_32_safe(&p, end, m->result, e_inval);
2737         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
2738         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
2739         p += sizeof(bad_replay_version);
2740         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
2741
2742         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
2743         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
2744                 goto e_inval;
2745
2746         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
2747                          e_inval);
2748         for (i = 0; i < m->num_ops; i++) {
2749                 struct ceph_osd_op *op = p;
2750
2751                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
2752                 p += sizeof(*op);
2753         }
2754
2755         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
2756         for (i = 0; i < m->num_ops; i++)
2757                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
2758
2759         if (version >= 5) {
2760                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
2761                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
2762                 p += sizeof(m->replay_version);
2763                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
2764         } else {
2765                 m->replay_version = bad_replay_version; /* struct */
2766                 m->user_version = le64_to_cpu(m->replay_version.version);
2767         }
2768
2769         if (version >= 6) {
2770                 if (version >= 7)
2771                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
2772                 else
2773                         decode_redir = 1;
2774         } else {
2775                 decode_redir = 0;
2776         }
2777
2778         if (decode_redir) {
2779                 ret = ceph_redirect_decode(&p, end, &m->redirect);
2780                 if (ret)
2781                         return ret;
2782         } else {
2783                 ceph_oloc_init(&m->redirect.oloc);
2784         }
2785
2786         return 0;
2787
2788 e_inval:
2789         return -EINVAL;
2790 }
2791
2792 /*
2793  * We are done with @req if
2794  *   - @m is a safe reply, or
2795  *   - @m is an unsafe reply and we didn't want a safe one
2796  */
2797 static bool done_request(const struct ceph_osd_request *req,
2798                          const struct MOSDOpReply *m)
2799 {
2800         return (m->result < 0 ||
2801                 (m->flags & CEPH_OSD_FLAG_ONDISK) ||
2802                 !(req->r_flags & CEPH_OSD_FLAG_ONDISK));
2803 }
2804
2805 /*
2806  * handle osd op reply.  either call the callback if it is specified,
2807  * or do the completion to wake up the waiting thread.
2808  *
2809  * ->r_unsafe_callback is set?  yes                     no
2810  *
2811  * first reply is OK (needed    r_cb/r_completion,      r_cb/r_completion,
2812  * any or needed/got safe)      r_safe_completion       r_safe_completion
2813  *
2814  * first reply is unsafe        r_unsafe_cb(true)       (nothing)
2815  *
2816  * when we get the safe reply   r_unsafe_cb(false),     r_cb/r_completion,
2817  *                              r_safe_completion       r_safe_completion
2818  */
2819 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2820 {
2821         struct ceph_osd_client *osdc = osd->o_osdc;
2822         struct ceph_osd_request *req;
2823         struct MOSDOpReply m;
2824         u64 tid = le64_to_cpu(msg->hdr.tid);
2825         u32 data_len = 0;
2826         bool already_acked;
2827         int ret;
2828         int i;
2829
2830         dout("%s msg %p tid %llu\n", __func__, msg, tid);
2831
2832         down_read(&osdc->lock);
2833         if (!osd_registered(osd)) {
2834                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
2835                 goto out_unlock_osdc;
2836         }
2837         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
2838
2839         mutex_lock(&osd->lock);
2840         req = lookup_request(&osd->o_requests, tid);
2841         if (!req) {
2842                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
2843                 goto out_unlock_session;
2844         }
2845
2846         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
2847         ret = decode_MOSDOpReply(msg, &m);
2848         m.redirect.oloc.pool_ns = NULL;
2849         if (ret) {
2850                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2851                        req->r_tid, ret);
2852                 ceph_msg_dump(msg);
2853                 goto fail_request;
2854         }
2855         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2856              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2857              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2858              le64_to_cpu(m.replay_version.version), m.user_version);
2859
2860         if (m.retry_attempt >= 0) {
2861                 if (m.retry_attempt != req->r_attempts - 1) {
2862                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2863                              req, req->r_tid, m.retry_attempt,
2864                              req->r_attempts - 1);
2865                         goto out_unlock_session;
2866                 }
2867         } else {
2868                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2869         }
2870
2871         if (!ceph_oloc_empty(&m.redirect.oloc)) {
2872                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2873                      m.redirect.oloc.pool);
2874                 unlink_request(osd, req);
2875                 mutex_unlock(&osd->lock);
2876
2877                 /*
2878                  * Not ceph_oloc_copy() - changing pool_ns is not
2879                  * supported.
2880                  */
2881                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
2882                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
2883                                 CEPH_OSD_FLAG_IGNORE_OVERLAY |
2884                                 CEPH_OSD_FLAG_IGNORE_CACHE;
2885                 req->r_tid = 0;
2886                 __submit_request(req, false);
2887                 goto out_unlock_osdc;
2888         }
2889
2890         if (m.num_ops != req->r_num_ops) {
2891                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2892                        req->r_num_ops, req->r_tid);
2893                 goto fail_request;
2894         }
2895         for (i = 0; i < req->r_num_ops; i++) {
2896                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
2897                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
2898                 req->r_ops[i].rval = m.rval[i];
2899                 req->r_ops[i].outdata_len = m.outdata_len[i];
2900                 data_len += m.outdata_len[i];
2901         }
2902         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
2903                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
2904                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
2905                 goto fail_request;
2906         }
2907         dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__,
2908              req, req->r_tid, req->r_got_reply, m.result, data_len);
2909
2910         already_acked = req->r_got_reply;
2911         if (!already_acked) {
2912                 req->r_result = m.result ?: data_len;
2913                 req->r_replay_version = m.replay_version; /* struct */
2914                 req->r_got_reply = true;
2915         } else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) {
2916                 dout("req %p tid %llu dup ack\n", req, req->r_tid);
2917                 goto out_unlock_session;
2918         }
2919
2920         if (done_request(req, &m)) {
2921                 __finish_request(req);
2922                 if (req->r_linger) {
2923                         WARN_ON(req->r_unsafe_callback);
2924                         dout("req %p tid %llu cb (locked)\n", req, req->r_tid);
2925                         __complete_request(req);
2926                 }
2927         }
2928
2929         mutex_unlock(&osd->lock);
2930         up_read(&osdc->lock);
2931
2932         if (done_request(req, &m)) {
2933                 if (already_acked && req->r_unsafe_callback) {
2934                         dout("req %p tid %llu safe-cb\n", req, req->r_tid);
2935                         req->r_unsafe_callback(req, false);
2936                 } else if (!req->r_linger) {
2937                         dout("req %p tid %llu cb\n", req, req->r_tid);
2938                         __complete_request(req);
2939                 }
2940                 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2941                         complete_all(&req->r_safe_completion);
2942                 ceph_osdc_put_request(req);
2943         } else {
2944                 if (req->r_unsafe_callback) {
2945                         dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
2946                         req->r_unsafe_callback(req, true);
2947                 } else {
2948                         WARN_ON(1);
2949                 }
2950         }
2951
2952         return;
2953
2954 fail_request:
2955         complete_request(req, -EIO);
2956 out_unlock_session:
2957         mutex_unlock(&osd->lock);
2958 out_unlock_osdc:
2959         up_read(&osdc->lock);
2960 }
2961
2962 static void set_pool_was_full(struct ceph_osd_client *osdc)
2963 {
2964         struct rb_node *n;
2965
2966         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
2967                 struct ceph_pg_pool_info *pi =
2968                     rb_entry(n, struct ceph_pg_pool_info, node);
2969
2970                 pi->was_full = __pool_full(pi);
2971         }
2972 }
2973
2974 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
2975 {
2976         struct ceph_pg_pool_info *pi;
2977
2978         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
2979         if (!pi)
2980                 return false;
2981
2982         return pi->was_full && !__pool_full(pi);
2983 }
2984
2985 static enum calc_target_result
2986 recalc_linger_target(struct ceph_osd_linger_request *lreq)
2987 {
2988         struct ceph_osd_client *osdc = lreq->osdc;
2989         enum calc_target_result ct_res;
2990
2991         ct_res = calc_target(osdc, &lreq->t, &lreq->last_force_resend, true);
2992         if (ct_res == CALC_TARGET_NEED_RESEND) {
2993                 struct ceph_osd *osd;
2994
2995                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2996                 if (osd != lreq->osd) {
2997                         unlink_linger(lreq->osd, lreq);
2998                         link_linger(osd, lreq);
2999                 }
3000         }
3001
3002         return ct_res;
3003 }
3004
3005 /*
3006  * Requeue requests whose mapping to an OSD has changed.
3007  */
3008 static void scan_requests(struct ceph_osd *osd,
3009                           bool force_resend,
3010                           bool cleared_full,
3011                           bool check_pool_cleared_full,
3012                           struct rb_root *need_resend,
3013                           struct list_head *need_resend_linger)
3014 {
3015         struct ceph_osd_client *osdc = osd->o_osdc;
3016         struct rb_node *n;
3017         bool force_resend_writes;
3018
3019         for (n = rb_first(&osd->o_linger_requests); n; ) {
3020                 struct ceph_osd_linger_request *lreq =
3021                     rb_entry(n, struct ceph_osd_linger_request, node);
3022                 enum calc_target_result ct_res;
3023
3024                 n = rb_next(n); /* recalc_linger_target() */
3025
3026                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3027                      lreq->linger_id);
3028                 ct_res = recalc_linger_target(lreq);
3029                 switch (ct_res) {
3030                 case CALC_TARGET_NO_ACTION:
3031                         force_resend_writes = cleared_full ||
3032                             (check_pool_cleared_full &&
3033                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3034                         if (!force_resend && !force_resend_writes)
3035                                 break;
3036
3037                         /* fall through */
3038                 case CALC_TARGET_NEED_RESEND:
3039                         cancel_linger_map_check(lreq);
3040                         /*
3041                          * scan_requests() for the previous epoch(s)
3042                          * may have already added it to the list, since
3043                          * it's not unlinked here.
3044                          */
3045                         if (list_empty(&lreq->scan_item))
3046                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3047                         break;
3048                 case CALC_TARGET_POOL_DNE:
3049                         check_linger_pool_dne(lreq);
3050                         break;
3051                 }
3052         }
3053
3054         for (n = rb_first(&osd->o_requests); n; ) {
3055                 struct ceph_osd_request *req =
3056                     rb_entry(n, struct ceph_osd_request, r_node);
3057                 enum calc_target_result ct_res;
3058
3059                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3060
3061                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3062                 ct_res = calc_target(osdc, &req->r_t,
3063                                      &req->r_last_force_resend, false);
3064                 switch (ct_res) {
3065                 case CALC_TARGET_NO_ACTION:
3066                         force_resend_writes = cleared_full ||
3067                             (check_pool_cleared_full &&
3068                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3069                         if (!force_resend &&
3070                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3071                              !force_resend_writes))
3072                                 break;
3073
3074                         /* fall through */
3075                 case CALC_TARGET_NEED_RESEND:
3076                         cancel_map_check(req);
3077                         unlink_request(osd, req);
3078                         insert_request(need_resend, req);
3079                         break;
3080                 case CALC_TARGET_POOL_DNE:
3081                         check_pool_dne(req);
3082                         break;
3083                 }
3084         }
3085 }
3086
3087 static int handle_one_map(struct ceph_osd_client *osdc,
3088                           void *p, void *end, bool incremental,
3089                           struct rb_root *need_resend,
3090                           struct list_head *need_resend_linger)
3091 {
3092         struct ceph_osdmap *newmap;
3093         struct rb_node *n;
3094         bool skipped_map = false;
3095         bool was_full;
3096
3097         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3098         set_pool_was_full(osdc);
3099
3100         if (incremental)
3101                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3102         else
3103                 newmap = ceph_osdmap_decode(&p, end);
3104         if (IS_ERR(newmap))
3105                 return PTR_ERR(newmap);
3106
3107         if (newmap != osdc->osdmap) {
3108                 /*
3109                  * Preserve ->was_full before destroying the old map.
3110                  * For pools that weren't in the old map, ->was_full
3111                  * should be false.
3112                  */
3113                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3114                         struct ceph_pg_pool_info *pi =
3115                             rb_entry(n, struct ceph_pg_pool_info, node);
3116                         struct ceph_pg_pool_info *old_pi;
3117
3118                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3119                         if (old_pi)
3120                                 pi->was_full = old_pi->was_full;
3121                         else
3122                                 WARN_ON(pi->was_full);
3123                 }
3124
3125                 if (osdc->osdmap->epoch &&
3126                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3127                         WARN_ON(incremental);
3128                         skipped_map = true;
3129                 }
3130
3131                 ceph_osdmap_destroy(osdc->osdmap);
3132                 osdc->osdmap = newmap;
3133         }
3134
3135         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3136         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3137                       need_resend, need_resend_linger);
3138
3139         for (n = rb_first(&osdc->osds); n; ) {
3140                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3141
3142                 n = rb_next(n); /* close_osd() */
3143
3144                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3145                               need_resend_linger);
3146                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3147                     memcmp(&osd->o_con.peer_addr,
3148                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3149                            sizeof(struct ceph_entity_addr)))
3150                         close_osd(osd);
3151         }
3152
3153         return 0;
3154 }
3155
3156 static void kick_requests(struct ceph_osd_client *osdc,
3157                           struct rb_root *need_resend,
3158                           struct list_head *need_resend_linger)
3159 {
3160         struct ceph_osd_linger_request *lreq, *nlreq;
3161         struct rb_node *n;
3162
3163         for (n = rb_first(need_resend); n; ) {
3164                 struct ceph_osd_request *req =
3165                     rb_entry(n, struct ceph_osd_request, r_node);
3166                 struct ceph_osd *osd;
3167
3168                 n = rb_next(n);
3169                 erase_request(need_resend, req); /* before link_request() */
3170
3171                 WARN_ON(req->r_osd);
3172                 calc_target(osdc, &req->r_t, NULL, false);
3173                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3174                 link_request(osd, req);
3175                 if (!req->r_linger) {
3176                         if (!osd_homeless(osd) && !req->r_t.paused)
3177                                 send_request(req);
3178                 } else {
3179                         cancel_linger_request(req);
3180                 }
3181         }
3182
3183         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3184                 if (!osd_homeless(lreq->osd))
3185                         send_linger(lreq);
3186
3187                 list_del_init(&lreq->scan_item);
3188         }
3189 }
3190
3191 /*
3192  * Process updated osd map.
3193  *
3194  * The message contains any number of incremental and full maps, normally
3195  * indicating some sort of topology change in the cluster.  Kick requests
3196  * off to different OSDs as needed.
3197  */
3198 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3199 {
3200         void *p = msg->front.iov_base;
3201         void *const end = p + msg->front.iov_len;
3202         u32 nr_maps, maplen;
3203         u32 epoch;
3204         struct ceph_fsid fsid;
3205         struct rb_root need_resend = RB_ROOT;
3206         LIST_HEAD(need_resend_linger);
3207         bool handled_incremental = false;
3208         bool was_pauserd, was_pausewr;
3209         bool pauserd, pausewr;
3210         int err;
3211
3212         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3213         down_write(&osdc->lock);
3214
3215         /* verify fsid */
3216         ceph_decode_need(&p, end, sizeof(fsid), bad);
3217         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3218         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3219                 goto bad;
3220
3221         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3222         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3223                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3224                       have_pool_full(osdc);
3225
3226         /* incremental maps */
3227         ceph_decode_32_safe(&p, end, nr_maps, bad);
3228         dout(" %d inc maps\n", nr_maps);
3229         while (nr_maps > 0) {
3230                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3231                 epoch = ceph_decode_32(&p);
3232                 maplen = ceph_decode_32(&p);
3233                 ceph_decode_need(&p, end, maplen, bad);
3234                 if (osdc->osdmap->epoch &&
3235                     osdc->osdmap->epoch + 1 == epoch) {
3236                         dout("applying incremental map %u len %d\n",
3237                              epoch, maplen);
3238                         err = handle_one_map(osdc, p, p + maplen, true,
3239                                              &need_resend, &need_resend_linger);
3240                         if (err)
3241                                 goto bad;
3242                         handled_incremental = true;
3243                 } else {
3244                         dout("ignoring incremental map %u len %d\n",
3245                              epoch, maplen);
3246                 }
3247                 p += maplen;
3248                 nr_maps--;
3249         }
3250         if (handled_incremental)
3251                 goto done;
3252
3253         /* full maps */
3254         ceph_decode_32_safe(&p, end, nr_maps, bad);
3255         dout(" %d full maps\n", nr_maps);
3256         while (nr_maps) {
3257                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3258                 epoch = ceph_decode_32(&p);
3259                 maplen = ceph_decode_32(&p);
3260                 ceph_decode_need(&p, end, maplen, bad);
3261                 if (nr_maps > 1) {
3262                         dout("skipping non-latest full map %u len %d\n",
3263                              epoch, maplen);
3264                 } else if (osdc->osdmap->epoch >= epoch) {
3265                         dout("skipping full map %u len %d, "
3266                              "older than our %u\n", epoch, maplen,
3267                              osdc->osdmap->epoch);
3268                 } else {
3269                         dout("taking full map %u len %d\n", epoch, maplen);
3270                         err = handle_one_map(osdc, p, p + maplen, false,
3271                                              &need_resend, &need_resend_linger);
3272                         if (err)
3273                                 goto bad;
3274                 }
3275                 p += maplen;
3276                 nr_maps--;
3277         }
3278
3279 done:
3280         /*
3281          * subscribe to subsequent osdmap updates if full to ensure
3282          * we find out when we are no longer full and stop returning
3283          * ENOSPC.
3284          */
3285         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3286         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3287                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3288                   have_pool_full(osdc);
3289         if (was_pauserd || was_pausewr || pauserd || pausewr)
3290                 maybe_request_map(osdc);
3291
3292         kick_requests(osdc, &need_resend, &need_resend_linger);
3293
3294         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3295                           osdc->osdmap->epoch);
3296         up_write(&osdc->lock);
3297         wake_up_all(&osdc->client->auth_wq);
3298         return;
3299
3300 bad:
3301         pr_err("osdc handle_map corrupt msg\n");
3302         ceph_msg_dump(msg);
3303         up_write(&osdc->lock);
3304 }
3305
3306 /*
3307  * Resubmit requests pending on the given osd.
3308  */
3309 static void kick_osd_requests(struct ceph_osd *osd)
3310 {
3311         struct rb_node *n;
3312
3313         for (n = rb_first(&osd->o_requests); n; ) {
3314                 struct ceph_osd_request *req =
3315                     rb_entry(n, struct ceph_osd_request, r_node);
3316
3317                 n = rb_next(n); /* cancel_linger_request() */
3318
3319                 if (!req->r_linger) {
3320                         if (!req->r_t.paused)
3321                                 send_request(req);
3322                 } else {
3323                         cancel_linger_request(req);
3324                 }
3325         }
3326         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3327                 struct ceph_osd_linger_request *lreq =
3328                     rb_entry(n, struct ceph_osd_linger_request, node);
3329
3330                 send_linger(lreq);
3331         }
3332 }
3333
3334 /*
3335  * If the osd connection drops, we need to resubmit all requests.
3336  */
3337 static void osd_fault(struct ceph_connection *con)
3338 {
3339         struct ceph_osd *osd = con->private;
3340         struct ceph_osd_client *osdc = osd->o_osdc;
3341
3342         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3343
3344         down_write(&osdc->lock);
3345         if (!osd_registered(osd)) {
3346                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3347                 goto out_unlock;
3348         }
3349
3350         if (!reopen_osd(osd))
3351                 kick_osd_requests(osd);
3352         maybe_request_map(osdc);
3353
3354 out_unlock:
3355         up_write(&osdc->lock);
3356 }
3357
3358 /*
3359  * Process osd watch notifications
3360  */
3361 static void handle_watch_notify(struct ceph_osd_client *osdc,
3362                                 struct ceph_msg *msg)
3363 {
3364         void *p = msg->front.iov_base;
3365         void *const end = p + msg->front.iov_len;
3366         struct ceph_osd_linger_request *lreq;
3367         struct linger_work *lwork;
3368         u8 proto_ver, opcode;
3369         u64 cookie, notify_id;
3370         u64 notifier_id = 0;
3371         s32 return_code = 0;
3372         void *payload = NULL;
3373         u32 payload_len = 0;
3374
3375         ceph_decode_8_safe(&p, end, proto_ver, bad);
3376         ceph_decode_8_safe(&p, end, opcode, bad);
3377         ceph_decode_64_safe(&p, end, cookie, bad);
3378         p += 8; /* skip ver */
3379         ceph_decode_64_safe(&p, end, notify_id, bad);
3380
3381         if (proto_ver >= 1) {
3382                 ceph_decode_32_safe(&p, end, payload_len, bad);
3383                 ceph_decode_need(&p, end, payload_len, bad);
3384                 payload = p;
3385                 p += payload_len;
3386         }
3387
3388         if (le16_to_cpu(msg->hdr.version) >= 2)
3389                 ceph_decode_32_safe(&p, end, return_code, bad);
3390
3391         if (le16_to_cpu(msg->hdr.version) >= 3)
3392                 ceph_decode_64_safe(&p, end, notifier_id, bad);
3393
3394         down_read(&osdc->lock);
3395         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
3396         if (!lreq) {
3397                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
3398                      cookie);
3399                 goto out_unlock_osdc;
3400         }
3401
3402         mutex_lock(&lreq->lock);
3403         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
3404              opcode, cookie, lreq, lreq->is_watch);
3405         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
3406                 if (!lreq->last_error) {
3407                         lreq->last_error = -ENOTCONN;
3408                         queue_watch_error(lreq);
3409                 }
3410         } else if (!lreq->is_watch) {
3411                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3412                 if (lreq->notify_id && lreq->notify_id != notify_id) {
3413                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
3414                              lreq->notify_id, notify_id);
3415                 } else if (!completion_done(&lreq->notify_finish_wait)) {
3416                         struct ceph_msg_data *data =
3417                             list_first_entry_or_null(&msg->data,
3418                                                      struct ceph_msg_data,
3419                                                      links);
3420
3421                         if (data) {
3422                                 if (lreq->preply_pages) {
3423                                         WARN_ON(data->type !=
3424                                                         CEPH_MSG_DATA_PAGES);
3425                                         *lreq->preply_pages = data->pages;
3426                                         *lreq->preply_len = data->length;
3427                                 } else {
3428                                         ceph_release_page_vector(data->pages,
3429                                                calc_pages_for(0, data->length));
3430                                 }
3431                         }
3432                         lreq->notify_finish_error = return_code;
3433                         complete_all(&lreq->notify_finish_wait);
3434                 }
3435         } else {
3436                 /* CEPH_WATCH_EVENT_NOTIFY */
3437                 lwork = lwork_alloc(lreq, do_watch_notify);
3438                 if (!lwork) {
3439                         pr_err("failed to allocate notify-lwork\n");
3440                         goto out_unlock_lreq;
3441                 }
3442
3443                 lwork->notify.notify_id = notify_id;
3444                 lwork->notify.notifier_id = notifier_id;
3445                 lwork->notify.payload = payload;
3446                 lwork->notify.payload_len = payload_len;
3447                 lwork->notify.msg = ceph_msg_get(msg);
3448                 lwork_queue(lwork);
3449         }
3450
3451 out_unlock_lreq:
3452         mutex_unlock(&lreq->lock);
3453 out_unlock_osdc:
3454         up_read(&osdc->lock);
3455         return;
3456
3457 bad:
3458         pr_err("osdc handle_watch_notify corrupt msg\n");
3459 }
3460
3461 /*
3462  * Register request, send initial attempt.
3463  */
3464 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3465                             struct ceph_osd_request *req,
3466                             bool nofail)
3467 {
3468         down_read(&osdc->lock);
3469         submit_request(req, false);
3470         up_read(&osdc->lock);
3471
3472         return 0;
3473 }
3474 EXPORT_SYMBOL(ceph_osdc_start_request);
3475
3476 /*
3477  * Unregister a registered request.  The request is not completed (i.e.
3478  * no callbacks or wakeups) - higher layers are supposed to know what
3479  * they are canceling.
3480  */
3481 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
3482 {
3483         struct ceph_osd_client *osdc = req->r_osdc;
3484
3485         down_write(&osdc->lock);
3486         if (req->r_osd)
3487                 cancel_request(req);
3488         up_write(&osdc->lock);
3489 }
3490 EXPORT_SYMBOL(ceph_osdc_cancel_request);
3491
3492 /*
3493  * @timeout: in jiffies, 0 means "wait forever"
3494  */
3495 static int wait_request_timeout(struct ceph_osd_request *req,
3496                                 unsigned long timeout)
3497 {
3498         long left;
3499
3500         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3501         left = wait_for_completion_killable_timeout(&req->r_completion,
3502                                                 ceph_timeout_jiffies(timeout));
3503         if (left <= 0) {
3504                 left = left ?: -ETIMEDOUT;
3505                 ceph_osdc_cancel_request(req);
3506
3507                 /* kludge - need to to wake ceph_osdc_sync() */
3508                 complete_all(&req->r_safe_completion);
3509         } else {
3510                 left = req->r_result; /* completed */
3511         }
3512
3513         return left;
3514 }
3515
3516 /*
3517  * wait for a request to complete
3518  */
3519 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
3520                            struct ceph_osd_request *req)
3521 {
3522         return wait_request_timeout(req, 0);
3523 }
3524 EXPORT_SYMBOL(ceph_osdc_wait_request);
3525
3526 /*
3527  * sync - wait for all in-flight requests to flush.  avoid starvation.
3528  */
3529 void ceph_osdc_sync(struct ceph_osd_client *osdc)
3530 {
3531         struct rb_node *n, *p;
3532         u64 last_tid = atomic64_read(&osdc->last_tid);
3533
3534 again:
3535         down_read(&osdc->lock);
3536         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3537                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3538
3539                 mutex_lock(&osd->lock);
3540                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
3541                         struct ceph_osd_request *req =
3542                             rb_entry(p, struct ceph_osd_request, r_node);
3543
3544                         if (req->r_tid > last_tid)
3545                                 break;
3546
3547                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
3548                                 continue;
3549
3550                         ceph_osdc_get_request(req);
3551                         mutex_unlock(&osd->lock);
3552                         up_read(&osdc->lock);
3553                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
3554                              __func__, req, req->r_tid, last_tid);
3555                         wait_for_completion(&req->r_safe_completion);
3556                         ceph_osdc_put_request(req);
3557                         goto again;
3558                 }
3559
3560                 mutex_unlock(&osd->lock);
3561         }
3562
3563         up_read(&osdc->lock);
3564         dout("%s done last_tid %llu\n", __func__, last_tid);
3565 }
3566 EXPORT_SYMBOL(ceph_osdc_sync);
3567
3568 static struct ceph_osd_request *
3569 alloc_linger_request(struct ceph_osd_linger_request *lreq)
3570 {
3571         struct ceph_osd_request *req;
3572
3573         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
3574         if (!req)
3575                 return NULL;
3576
3577         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3578         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3579
3580         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
3581                 ceph_osdc_put_request(req);
3582                 return NULL;
3583         }
3584
3585         return req;
3586 }
3587
3588 /*
3589  * Returns a handle, caller owns a ref.
3590  */
3591 struct ceph_osd_linger_request *
3592 ceph_osdc_watch(struct ceph_osd_client *osdc,
3593                 struct ceph_object_id *oid,
3594                 struct ceph_object_locator *oloc,
3595                 rados_watchcb2_t wcb,
3596                 rados_watcherrcb_t errcb,
3597                 void *data)
3598 {
3599         struct ceph_osd_linger_request *lreq;
3600         int ret;
3601
3602         lreq = linger_alloc(osdc);
3603         if (!lreq)
3604                 return ERR_PTR(-ENOMEM);
3605
3606         lreq->is_watch = true;
3607         lreq->wcb = wcb;
3608         lreq->errcb = errcb;
3609         lreq->data = data;
3610         lreq->watch_valid_thru = jiffies;
3611
3612         ceph_oid_copy(&lreq->t.base_oid, oid);
3613         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3614         lreq->t.flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3615         lreq->mtime = CURRENT_TIME;
3616
3617         lreq->reg_req = alloc_linger_request(lreq);
3618         if (!lreq->reg_req) {
3619                 ret = -ENOMEM;
3620                 goto err_put_lreq;
3621         }
3622
3623         lreq->ping_req = alloc_linger_request(lreq);
3624         if (!lreq->ping_req) {
3625                 ret = -ENOMEM;
3626                 goto err_put_lreq;
3627         }
3628
3629         down_write(&osdc->lock);
3630         linger_register(lreq); /* before osd_req_op_* */
3631         osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
3632                               CEPH_OSD_WATCH_OP_WATCH);
3633         osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
3634                               CEPH_OSD_WATCH_OP_PING);
3635         linger_submit(lreq);
3636         up_write(&osdc->lock);
3637
3638         ret = linger_reg_commit_wait(lreq);
3639         if (ret) {
3640                 linger_cancel(lreq);
3641                 goto err_put_lreq;
3642         }
3643
3644         return lreq;
3645
3646 err_put_lreq:
3647         linger_put(lreq);
3648         return ERR_PTR(ret);
3649 }
3650 EXPORT_SYMBOL(ceph_osdc_watch);
3651
3652 /*
3653  * Releases a ref.
3654  *
3655  * Times out after mount_timeout to preserve rbd unmap behaviour
3656  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3657  * with mount_timeout").
3658  */
3659 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3660                       struct ceph_osd_linger_request *lreq)
3661 {
3662         struct ceph_options *opts = osdc->client->options;
3663         struct ceph_osd_request *req;
3664         int ret;
3665
3666         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3667         if (!req)
3668                 return -ENOMEM;
3669
3670         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3671         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3672         req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3673         req->r_mtime = CURRENT_TIME;
3674         osd_req_op_watch_init(req, 0, lreq->linger_id,
3675                               CEPH_OSD_WATCH_OP_UNWATCH);
3676
3677         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3678         if (ret)
3679                 goto out_put_req;
3680
3681         ceph_osdc_start_request(osdc, req, false);
3682         linger_cancel(lreq);
3683         linger_put(lreq);
3684         ret = wait_request_timeout(req, opts->mount_timeout);
3685
3686 out_put_req:
3687         ceph_osdc_put_request(req);
3688         return ret;
3689 }
3690 EXPORT_SYMBOL(ceph_osdc_unwatch);
3691
3692 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
3693                                       u64 notify_id, u64 cookie, void *payload,
3694                                       size_t payload_len)
3695 {
3696         struct ceph_osd_req_op *op;
3697         struct ceph_pagelist *pl;
3698         int ret;
3699
3700         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
3701
3702         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3703         if (!pl)
3704                 return -ENOMEM;
3705
3706         ceph_pagelist_init(pl);
3707         ret = ceph_pagelist_encode_64(pl, notify_id);
3708         ret |= ceph_pagelist_encode_64(pl, cookie);
3709         if (payload) {
3710                 ret |= ceph_pagelist_encode_32(pl, payload_len);
3711                 ret |= ceph_pagelist_append(pl, payload, payload_len);
3712         } else {
3713                 ret |= ceph_pagelist_encode_32(pl, 0);
3714         }
3715         if (ret) {
3716                 ceph_pagelist_release(pl);
3717                 return -ENOMEM;
3718         }
3719
3720         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
3721         op->indata_len = pl->length;
3722         return 0;
3723 }
3724
3725 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
3726                          struct ceph_object_id *oid,
3727                          struct ceph_object_locator *oloc,
3728                          u64 notify_id,
3729                          u64 cookie,
3730                          void *payload,
3731                          size_t payload_len)
3732 {
3733         struct ceph_osd_request *req;
3734         int ret;
3735
3736         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3737         if (!req)
3738                 return -ENOMEM;
3739
3740         ceph_oid_copy(&req->r_base_oid, oid);
3741         ceph_oloc_copy(&req->r_base_oloc, oloc);
3742         req->r_flags = CEPH_OSD_FLAG_READ;
3743
3744         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3745         if (ret)
3746                 goto out_put_req;
3747
3748         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
3749                                          payload_len);
3750         if (ret)
3751                 goto out_put_req;
3752
3753         ceph_osdc_start_request(osdc, req, false);
3754         ret = ceph_osdc_wait_request(osdc, req);
3755
3756 out_put_req:
3757         ceph_osdc_put_request(req);
3758         return ret;
3759 }
3760 EXPORT_SYMBOL(ceph_osdc_notify_ack);
3761
3762 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
3763                                   u64 cookie, u32 prot_ver, u32 timeout,
3764                                   void *payload, size_t payload_len)
3765 {
3766         struct ceph_osd_req_op *op;
3767         struct ceph_pagelist *pl;
3768         int ret;
3769
3770         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
3771         op->notify.cookie = cookie;
3772
3773         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3774         if (!pl)
3775                 return -ENOMEM;
3776
3777         ceph_pagelist_init(pl);
3778         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
3779         ret |= ceph_pagelist_encode_32(pl, timeout);
3780         ret |= ceph_pagelist_encode_32(pl, payload_len);
3781         ret |= ceph_pagelist_append(pl, payload, payload_len);
3782         if (ret) {
3783                 ceph_pagelist_release(pl);
3784                 return -ENOMEM;
3785         }
3786
3787         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
3788         op->indata_len = pl->length;
3789         return 0;
3790 }
3791
3792 /*
3793  * @timeout: in seconds
3794  *
3795  * @preply_{pages,len} are initialized both on success and error.
3796  * The caller is responsible for:
3797  *
3798  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3799  */
3800 int ceph_osdc_notify(struct ceph_osd_client *osdc,
3801                      struct ceph_object_id *oid,
3802                      struct ceph_object_locator *oloc,
3803                      void *payload,
3804                      size_t payload_len,
3805                      u32 timeout,
3806                      struct page ***preply_pages,
3807                      size_t *preply_len)
3808 {
3809         struct ceph_osd_linger_request *lreq;
3810         struct page **pages;
3811         int ret;
3812
3813         WARN_ON(!timeout);
3814         if (preply_pages) {
3815                 *preply_pages = NULL;
3816                 *preply_len = 0;
3817         }
3818
3819         lreq = linger_alloc(osdc);
3820         if (!lreq)
3821                 return -ENOMEM;
3822
3823         lreq->preply_pages = preply_pages;
3824         lreq->preply_len = preply_len;
3825
3826         ceph_oid_copy(&lreq->t.base_oid, oid);
3827         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3828         lreq->t.flags = CEPH_OSD_FLAG_READ;
3829
3830         lreq->reg_req = alloc_linger_request(lreq);
3831         if (!lreq->reg_req) {
3832                 ret = -ENOMEM;
3833                 goto out_put_lreq;
3834         }
3835
3836         /* for notify_id */
3837         pages = ceph_alloc_page_vector(1, GFP_NOIO);
3838         if (IS_ERR(pages)) {
3839                 ret = PTR_ERR(pages);
3840                 goto out_put_lreq;
3841         }
3842
3843         down_write(&osdc->lock);
3844         linger_register(lreq); /* before osd_req_op_* */
3845         ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
3846                                      timeout, payload, payload_len);
3847         if (ret) {
3848                 linger_unregister(lreq);
3849                 up_write(&osdc->lock);
3850                 ceph_release_page_vector(pages, 1);
3851                 goto out_put_lreq;
3852         }
3853         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
3854                                                  response_data),
3855                                  pages, PAGE_SIZE, 0, false, true);
3856         linger_submit(lreq);
3857         up_write(&osdc->lock);
3858
3859         ret = linger_reg_commit_wait(lreq);
3860         if (!ret)
3861                 ret = linger_notify_finish_wait(lreq);
3862         else
3863                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
3864
3865         linger_cancel(lreq);
3866 out_put_lreq:
3867         linger_put(lreq);
3868         return ret;
3869 }
3870 EXPORT_SYMBOL(ceph_osdc_notify);
3871
3872 /*
3873  * Return the number of milliseconds since the watch was last
3874  * confirmed, or an error.  If there is an error, the watch is no
3875  * longer valid, and should be destroyed with ceph_osdc_unwatch().
3876  */
3877 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
3878                           struct ceph_osd_linger_request *lreq)
3879 {
3880         unsigned long stamp, age;
3881         int ret;
3882
3883         down_read(&osdc->lock);
3884         mutex_lock(&lreq->lock);
3885         stamp = lreq->watch_valid_thru;
3886         if (!list_empty(&lreq->pending_lworks)) {
3887                 struct linger_work *lwork =
3888                     list_first_entry(&lreq->pending_lworks,
3889                                      struct linger_work,
3890                                      pending_item);
3891
3892                 if (time_before(lwork->queued_stamp, stamp))
3893                         stamp = lwork->queued_stamp;
3894         }
3895         age = jiffies - stamp;
3896         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
3897              lreq, lreq->linger_id, age, lreq->last_error);
3898         /* we are truncating to msecs, so return a safe upper bound */
3899         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
3900
3901         mutex_unlock(&lreq->lock);
3902         up_read(&osdc->lock);
3903         return ret;
3904 }
3905
3906 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
3907 {
3908         u8 struct_v;
3909         u32 struct_len;
3910         int ret;
3911
3912         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
3913                                   &struct_v, &struct_len);
3914         if (ret)
3915                 return ret;
3916
3917         ceph_decode_copy(p, &item->name, sizeof(item->name));
3918         item->cookie = ceph_decode_64(p);
3919         *p += 4; /* skip timeout_seconds */
3920         if (struct_v >= 2) {
3921                 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
3922                 ceph_decode_addr(&item->addr);
3923         }
3924
3925         dout("%s %s%llu cookie %llu addr %s\n", __func__,
3926              ENTITY_NAME(item->name), item->cookie,
3927              ceph_pr_addr(&item->addr.in_addr));
3928         return 0;
3929 }
3930
3931 static int decode_watchers(void **p, void *end,
3932                            struct ceph_watch_item **watchers,
3933                            u32 *num_watchers)
3934 {
3935         u8 struct_v;
3936         u32 struct_len;
3937         int i;
3938         int ret;
3939
3940         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
3941                                   &struct_v, &struct_len);
3942         if (ret)
3943                 return ret;
3944
3945         *num_watchers = ceph_decode_32(p);
3946         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
3947         if (!*watchers)
3948                 return -ENOMEM;
3949
3950         for (i = 0; i < *num_watchers; i++) {
3951                 ret = decode_watcher(p, end, *watchers + i);
3952                 if (ret) {
3953                         kfree(*watchers);
3954                         return ret;
3955                 }
3956         }
3957
3958         return 0;
3959 }
3960
3961 /*
3962  * On success, the caller is responsible for:
3963  *
3964  *     kfree(watchers);
3965  */
3966 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
3967                             struct ceph_object_id *oid,
3968                             struct ceph_object_locator *oloc,
3969                             struct ceph_watch_item **watchers,
3970                             u32 *num_watchers)
3971 {
3972         struct ceph_osd_request *req;
3973         struct page **pages;
3974         int ret;
3975
3976         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3977         if (!req)
3978                 return -ENOMEM;
3979
3980         ceph_oid_copy(&req->r_base_oid, oid);
3981         ceph_oloc_copy(&req->r_base_oloc, oloc);
3982         req->r_flags = CEPH_OSD_FLAG_READ;
3983
3984         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3985         if (ret)
3986                 goto out_put_req;
3987
3988         pages = ceph_alloc_page_vector(1, GFP_NOIO);
3989         if (IS_ERR(pages)) {
3990                 ret = PTR_ERR(pages);
3991                 goto out_put_req;
3992         }
3993
3994         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
3995         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
3996                                                  response_data),
3997                                  pages, PAGE_SIZE, 0, false, true);
3998
3999         ceph_osdc_start_request(osdc, req, false);
4000         ret = ceph_osdc_wait_request(osdc, req);
4001         if (ret >= 0) {
4002                 void *p = page_address(pages[0]);
4003                 void *const end = p + req->r_ops[0].outdata_len;
4004
4005                 ret = decode_watchers(&p, end, watchers, num_watchers);
4006         }
4007
4008 out_put_req:
4009         ceph_osdc_put_request(req);
4010         return ret;
4011 }
4012 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4013
4014 /*
4015  * Call all pending notify callbacks - for use after a watch is
4016  * unregistered, to make sure no more callbacks for it will be invoked
4017  */
4018 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4019 {
4020         dout("%s osdc %p\n", __func__, osdc);
4021         flush_workqueue(osdc->notify_wq);
4022 }
4023 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4024
4025 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4026 {
4027         down_read(&osdc->lock);
4028         maybe_request_map(osdc);
4029         up_read(&osdc->lock);
4030 }
4031 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4032
4033 /*
4034  * Execute an OSD class method on an object.
4035  *
4036  * @flags: CEPH_OSD_FLAG_*
4037  * @resp_len: out param for reply length
4038  */
4039 int ceph_osdc_call(struct ceph_osd_client *osdc,
4040                    struct ceph_object_id *oid,
4041                    struct ceph_object_locator *oloc,
4042                    const char *class, const char *method,
4043                    unsigned int flags,
4044                    struct page *req_page, size_t req_len,
4045                    struct page *resp_page, size_t *resp_len)
4046 {
4047         struct ceph_osd_request *req;
4048         int ret;
4049
4050         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4051         if (!req)
4052                 return -ENOMEM;
4053
4054         ceph_oid_copy(&req->r_base_oid, oid);
4055         ceph_oloc_copy(&req->r_base_oloc, oloc);
4056         req->r_flags = flags;
4057
4058         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4059         if (ret)
4060                 goto out_put_req;
4061
4062         osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4063         if (req_page)
4064                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4065                                                   0, false, false);
4066         if (resp_page)
4067                 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4068                                                    PAGE_SIZE, 0, false, false);
4069
4070         ceph_osdc_start_request(osdc, req, false);
4071         ret = ceph_osdc_wait_request(osdc, req);
4072         if (ret >= 0) {
4073                 ret = req->r_ops[0].rval;
4074                 if (resp_page)
4075                         *resp_len = req->r_ops[0].outdata_len;
4076         }
4077
4078 out_put_req:
4079         ceph_osdc_put_request(req);
4080         return ret;
4081 }
4082 EXPORT_SYMBOL(ceph_osdc_call);
4083
4084 /*
4085  * init, shutdown
4086  */
4087 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4088 {
4089         int err;
4090
4091         dout("init\n");
4092         osdc->client = client;
4093         init_rwsem(&osdc->lock);
4094         osdc->osds = RB_ROOT;
4095         INIT_LIST_HEAD(&osdc->osd_lru);
4096         spin_lock_init(&osdc->osd_lru_lock);
4097         osd_init(&osdc->homeless_osd);
4098         osdc->homeless_osd.o_osdc = osdc;
4099         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4100         osdc->last_linger_id = CEPH_LINGER_ID_START;
4101         osdc->linger_requests = RB_ROOT;
4102         osdc->map_checks = RB_ROOT;
4103         osdc->linger_map_checks = RB_ROOT;
4104         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4105         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4106
4107         err = -ENOMEM;
4108         osdc->osdmap = ceph_osdmap_alloc();
4109         if (!osdc->osdmap)
4110                 goto out;
4111
4112         osdc->req_mempool = mempool_create_slab_pool(10,
4113                                                      ceph_osd_request_cache);
4114         if (!osdc->req_mempool)
4115                 goto out_map;
4116
4117         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4118                                 PAGE_SIZE, 10, true, "osd_op");
4119         if (err < 0)
4120                 goto out_mempool;
4121         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4122                                 PAGE_SIZE, 10, true, "osd_op_reply");
4123         if (err < 0)
4124                 goto out_msgpool;
4125
4126         err = -ENOMEM;
4127         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4128         if (!osdc->notify_wq)
4129                 goto out_msgpool_reply;
4130
4131         schedule_delayed_work(&osdc->timeout_work,
4132                               osdc->client->options->osd_keepalive_timeout);
4133         schedule_delayed_work(&osdc->osds_timeout_work,
4134             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4135
4136         return 0;
4137
4138 out_msgpool_reply:
4139         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4140 out_msgpool:
4141         ceph_msgpool_destroy(&osdc->msgpool_op);
4142 out_mempool:
4143         mempool_destroy(osdc->req_mempool);
4144 out_map:
4145         ceph_osdmap_destroy(osdc->osdmap);
4146 out:
4147         return err;
4148 }
4149
4150 void ceph_osdc_stop(struct ceph_osd_client *osdc)
4151 {
4152         flush_workqueue(osdc->notify_wq);
4153         destroy_workqueue(osdc->notify_wq);
4154         cancel_delayed_work_sync(&osdc->timeout_work);
4155         cancel_delayed_work_sync(&osdc->osds_timeout_work);
4156
4157         down_write(&osdc->lock);
4158         while (!RB_EMPTY_ROOT(&osdc->osds)) {
4159                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4160                                                 struct ceph_osd, o_node);
4161                 close_osd(osd);
4162         }
4163         up_write(&osdc->lock);
4164         WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1);
4165         osd_cleanup(&osdc->homeless_osd);
4166
4167         WARN_ON(!list_empty(&osdc->osd_lru));
4168         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4169         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4170         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4171         WARN_ON(atomic_read(&osdc->num_requests));
4172         WARN_ON(atomic_read(&osdc->num_homeless));
4173
4174         ceph_osdmap_destroy(osdc->osdmap);
4175         mempool_destroy(osdc->req_mempool);
4176         ceph_msgpool_destroy(&osdc->msgpool_op);
4177         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4178 }
4179
4180 /*
4181  * Read some contiguous pages.  If we cross a stripe boundary, shorten
4182  * *plen.  Return number of bytes read, or error.
4183  */
4184 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4185                         struct ceph_vino vino, struct ceph_file_layout *layout,
4186                         u64 off, u64 *plen,
4187                         u32 truncate_seq, u64 truncate_size,
4188                         struct page **pages, int num_pages, int page_align)
4189 {
4190         struct ceph_osd_request *req;
4191         int rc = 0;
4192
4193         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
4194              vino.snap, off, *plen);
4195         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
4196                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
4197                                     NULL, truncate_seq, truncate_size,
4198                                     false);
4199         if (IS_ERR(req))
4200                 return PTR_ERR(req);
4201
4202         /* it may be a short read due to an object boundary */
4203         osd_req_op_extent_osd_data_pages(req, 0,
4204                                 pages, *plen, page_align, false, false);
4205
4206         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
4207              off, *plen, *plen, page_align);
4208
4209         rc = ceph_osdc_start_request(osdc, req, false);
4210         if (!rc)
4211                 rc = ceph_osdc_wait_request(osdc, req);
4212
4213         ceph_osdc_put_request(req);
4214         dout("readpages result %d\n", rc);
4215         return rc;
4216 }
4217 EXPORT_SYMBOL(ceph_osdc_readpages);
4218
4219 /*
4220  * do a synchronous write on N pages
4221  */
4222 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
4223                          struct ceph_file_layout *layout,
4224                          struct ceph_snap_context *snapc,
4225                          u64 off, u64 len,
4226                          u32 truncate_seq, u64 truncate_size,
4227                          struct timespec *mtime,
4228                          struct page **pages, int num_pages)
4229 {
4230         struct ceph_osd_request *req;
4231         int rc = 0;
4232         int page_align = off & ~PAGE_MASK;
4233
4234         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
4235                                     CEPH_OSD_OP_WRITE,
4236                                     CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
4237                                     snapc, truncate_seq, truncate_size,
4238                                     true);
4239         if (IS_ERR(req))
4240                 return PTR_ERR(req);
4241
4242         /* it may be a short write due to an object boundary */
4243         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
4244                                 false, false);
4245         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
4246
4247         req->r_mtime = *mtime;
4248         rc = ceph_osdc_start_request(osdc, req, true);
4249         if (!rc)
4250                 rc = ceph_osdc_wait_request(osdc, req);
4251
4252         ceph_osdc_put_request(req);
4253         if (rc == 0)
4254                 rc = len;
4255         dout("writepages result %d\n", rc);
4256         return rc;
4257 }
4258 EXPORT_SYMBOL(ceph_osdc_writepages);
4259
4260 int ceph_osdc_setup(void)
4261 {
4262         size_t size = sizeof(struct ceph_osd_request) +
4263             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
4264
4265         BUG_ON(ceph_osd_request_cache);
4266         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
4267                                                    0, 0, NULL);
4268
4269         return ceph_osd_request_cache ? 0 : -ENOMEM;
4270 }
4271 EXPORT_SYMBOL(ceph_osdc_setup);
4272
4273 void ceph_osdc_cleanup(void)
4274 {
4275         BUG_ON(!ceph_osd_request_cache);
4276         kmem_cache_destroy(ceph_osd_request_cache);
4277         ceph_osd_request_cache = NULL;
4278 }
4279 EXPORT_SYMBOL(ceph_osdc_cleanup);
4280
4281 /*
4282  * handle incoming message
4283  */
4284 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4285 {
4286         struct ceph_osd *osd = con->private;
4287         struct ceph_osd_client *osdc = osd->o_osdc;
4288         int type = le16_to_cpu(msg->hdr.type);
4289
4290         switch (type) {
4291         case CEPH_MSG_OSD_MAP:
4292                 ceph_osdc_handle_map(osdc, msg);
4293                 break;
4294         case CEPH_MSG_OSD_OPREPLY:
4295                 handle_reply(osd, msg);
4296                 break;
4297         case CEPH_MSG_WATCH_NOTIFY:
4298                 handle_watch_notify(osdc, msg);
4299                 break;
4300
4301         default:
4302                 pr_err("received unknown message type %d %s\n", type,
4303                        ceph_msg_type_name(type));
4304         }
4305
4306         ceph_msg_put(msg);
4307 }
4308
4309 /*
4310  * Lookup and return message for incoming reply.  Don't try to do
4311  * anything about a larger than preallocated data portion of the
4312  * message at the moment - for now, just skip the message.
4313  */
4314 static struct ceph_msg *get_reply(struct ceph_connection *con,
4315                                   struct ceph_msg_header *hdr,
4316                                   int *skip)
4317 {
4318         struct ceph_osd *osd = con->private;
4319         struct ceph_osd_client *osdc = osd->o_osdc;
4320         struct ceph_msg *m = NULL;
4321         struct ceph_osd_request *req;
4322         int front_len = le32_to_cpu(hdr->front_len);
4323         int data_len = le32_to_cpu(hdr->data_len);
4324         u64 tid = le64_to_cpu(hdr->tid);
4325
4326         down_read(&osdc->lock);
4327         if (!osd_registered(osd)) {
4328                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
4329                 *skip = 1;
4330                 goto out_unlock_osdc;
4331         }
4332         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
4333
4334         mutex_lock(&osd->lock);
4335         req = lookup_request(&osd->o_requests, tid);
4336         if (!req) {
4337                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
4338                      osd->o_osd, tid);
4339                 *skip = 1;
4340                 goto out_unlock_session;
4341         }
4342
4343         ceph_msg_revoke_incoming(req->r_reply);
4344
4345         if (front_len > req->r_reply->front_alloc_len) {
4346                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4347                         __func__, osd->o_osd, req->r_tid, front_len,
4348                         req->r_reply->front_alloc_len);
4349                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
4350                                  false);
4351                 if (!m)
4352                         goto out_unlock_session;
4353                 ceph_msg_put(req->r_reply);
4354                 req->r_reply = m;
4355         }
4356
4357         if (data_len > req->r_reply->data_length) {
4358                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4359                         __func__, osd->o_osd, req->r_tid, data_len,
4360                         req->r_reply->data_length);
4361                 m = NULL;
4362                 *skip = 1;
4363                 goto out_unlock_session;
4364         }
4365
4366         m = ceph_msg_get(req->r_reply);
4367         dout("get_reply tid %lld %p\n", tid, m);
4368
4369 out_unlock_session:
4370         mutex_unlock(&osd->lock);
4371 out_unlock_osdc:
4372         up_read(&osdc->lock);
4373         return m;
4374 }
4375
4376 /*
4377  * TODO: switch to a msg-owned pagelist
4378  */
4379 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4380 {
4381         struct ceph_msg *m;
4382         int type = le16_to_cpu(hdr->type);
4383         u32 front_len = le32_to_cpu(hdr->front_len);
4384         u32 data_len = le32_to_cpu(hdr->data_len);
4385
4386         m = ceph_msg_new(type, front_len, GFP_NOIO, false);
4387         if (!m)
4388                 return NULL;
4389
4390         if (data_len) {
4391                 struct page **pages;
4392                 struct ceph_osd_data osd_data;
4393
4394                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4395                                                GFP_NOIO);
4396                 if (IS_ERR(pages)) {
4397                         ceph_msg_put(m);
4398                         return NULL;
4399                 }
4400
4401                 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
4402                                          false);
4403                 ceph_osdc_msg_data_add(m, &osd_data);
4404         }
4405
4406         return m;
4407 }
4408
4409 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
4410                                   struct ceph_msg_header *hdr,
4411                                   int *skip)
4412 {
4413         struct ceph_osd *osd = con->private;
4414         int type = le16_to_cpu(hdr->type);
4415
4416         *skip = 0;
4417         switch (type) {
4418         case CEPH_MSG_OSD_MAP:
4419         case CEPH_MSG_WATCH_NOTIFY:
4420                 return alloc_msg_with_page_vector(hdr);
4421         case CEPH_MSG_OSD_OPREPLY:
4422                 return get_reply(con, hdr, skip);
4423         default:
4424                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
4425                         osd->o_osd, type);
4426                 *skip = 1;
4427                 return NULL;
4428         }
4429 }
4430
4431 /*
4432  * Wrappers to refcount containing ceph_osd struct
4433  */
4434 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
4435 {
4436         struct ceph_osd *osd = con->private;
4437         if (get_osd(osd))
4438                 return con;
4439         return NULL;
4440 }
4441
4442 static void put_osd_con(struct ceph_connection *con)
4443 {
4444         struct ceph_osd *osd = con->private;
4445         put_osd(osd);
4446 }
4447
4448 /*
4449  * authentication
4450  */
4451 /*
4452  * Note: returned pointer is the address of a structure that's
4453  * managed separately.  Caller must *not* attempt to free it.
4454  */
4455 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4456                                         int *proto, int force_new)
4457 {
4458         struct ceph_osd *o = con->private;
4459         struct ceph_osd_client *osdc = o->o_osdc;
4460         struct ceph_auth_client *ac = osdc->client->monc.auth;
4461         struct ceph_auth_handshake *auth = &o->o_auth;
4462
4463         if (force_new && auth->authorizer) {
4464                 ceph_auth_destroy_authorizer(auth->authorizer);
4465                 auth->authorizer = NULL;
4466         }
4467         if (!auth->authorizer) {
4468                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4469                                                       auth);
4470                 if (ret)
4471                         return ERR_PTR(ret);
4472         } else {
4473                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4474                                                      auth);
4475                 if (ret)
4476                         return ERR_PTR(ret);
4477         }
4478         *proto = ac->protocol;
4479
4480         return auth;
4481 }
4482
4483 static int add_authorizer_challenge(struct ceph_connection *con,
4484                                     void *challenge_buf, int challenge_buf_len)
4485 {
4486         struct ceph_osd *o = con->private;
4487         struct ceph_osd_client *osdc = o->o_osdc;
4488         struct ceph_auth_client *ac = osdc->client->monc.auth;
4489
4490         return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
4491                                             challenge_buf, challenge_buf_len);
4492 }
4493
4494 static int verify_authorizer_reply(struct ceph_connection *con)
4495 {
4496         struct ceph_osd *o = con->private;
4497         struct ceph_osd_client *osdc = o->o_osdc;
4498         struct ceph_auth_client *ac = osdc->client->monc.auth;
4499
4500         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
4501 }
4502
4503 static int invalidate_authorizer(struct ceph_connection *con)
4504 {
4505         struct ceph_osd *o = con->private;
4506         struct ceph_osd_client *osdc = o->o_osdc;
4507         struct ceph_auth_client *ac = osdc->client->monc.auth;
4508
4509         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
4510         return ceph_monc_validate_auth(&osdc->client->monc);
4511 }
4512
4513 static int osd_sign_message(struct ceph_msg *msg)
4514 {
4515         struct ceph_osd *o = msg->con->private;
4516         struct ceph_auth_handshake *auth = &o->o_auth;
4517
4518         return ceph_auth_sign_message(auth, msg);
4519 }
4520
4521 static int osd_check_message_signature(struct ceph_msg *msg)
4522 {
4523         struct ceph_osd *o = msg->con->private;
4524         struct ceph_auth_handshake *auth = &o->o_auth;
4525
4526         return ceph_auth_check_message_signature(auth, msg);
4527 }
4528
4529 static const struct ceph_connection_operations osd_con_ops = {
4530         .get = get_osd_con,
4531         .put = put_osd_con,
4532         .dispatch = dispatch,
4533         .get_authorizer = get_authorizer,
4534         .add_authorizer_challenge = add_authorizer_challenge,
4535         .verify_authorizer_reply = verify_authorizer_reply,
4536         .invalidate_authorizer = invalidate_authorizer,
4537         .alloc_msg = alloc_msg,
4538         .sign_message = osd_sign_message,
4539         .check_message_signature = osd_check_message_signature,
4540         .fault = osd_fault,
4541 };