4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/lov/lov_pack.c
34 * (Un)packing of OST/MDS requests
36 * Author: Andreas Dilger <adilger@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_LOV
41 #include "../include/lustre_net.h"
42 #include "../include/obd.h"
43 #include "../include/obd_class.h"
44 #include "../include/obd_support.h"
45 #include "../include/lustre/lustre_user.h"
47 #include "lov_internal.h"
48 #include "lov_cl_internal.h"
50 void lov_dump_lmm_common(int level, void *lmmp)
52 struct lov_mds_md *lmm = lmmp;
55 lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
56 CDEBUG(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
57 POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
58 le32_to_cpu(lmm->lmm_pattern));
59 CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
60 le32_to_cpu(lmm->lmm_stripe_size),
61 le16_to_cpu(lmm->lmm_stripe_count),
62 le16_to_cpu(lmm->lmm_layout_gen));
65 static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
70 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
71 CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
72 stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
76 for (i = 0; i < stripe_count; ++i, ++lod) {
79 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
80 CDEBUG(level, "stripe %u idx %u subobj "DOSTID"\n", i,
81 le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
85 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
87 lov_dump_lmm_common(level, lmm);
88 lov_dump_lmm_objects(level, lmm->lmm_objects,
89 le16_to_cpu(lmm->lmm_stripe_count));
92 void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
94 lov_dump_lmm_common(level, lmm);
95 CDEBUG(level, "pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
96 lov_dump_lmm_objects(level, lmm->lmm_objects,
97 le16_to_cpu(lmm->lmm_stripe_count));
100 /* Pack LOV object metadata for disk storage. It is packed in LE byte
101 * order and is opaque to the networking layer.
103 * XXX In the future, this will be enhanced to get the EA size from the
104 * underlying OSC device(s) to get their EA sizes so we can stack
105 * LOVs properly. For now lov_mds_md_size() just assumes one u64
108 int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
109 struct lov_stripe_md *lsm)
111 struct lov_mds_md_v1 *lmmv1;
112 struct lov_mds_md_v3 *lmmv3;
114 struct lov_ost_data_v1 *lmm_objects;
115 int lmm_size, lmm_magic;
120 lmm_magic = lsm->lsm_magic;
123 lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
125 /* lsm == NULL and lmmp == NULL */
126 lmm_magic = LOV_MAGIC;
129 if ((lmm_magic != LOV_MAGIC_V1) &&
130 (lmm_magic != LOV_MAGIC_V3)) {
131 CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
132 lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
137 /* If we are just sizing the EA, limit the stripe count
138 * to the actual number of OSTs in this filesystem.
141 stripe_count = lov_get_stripecnt(lov, lmm_magic,
142 lsm->lsm_stripe_count);
143 lsm->lsm_stripe_count = stripe_count;
144 } else if (!lsm_is_released(lsm)) {
145 stripe_count = lsm->lsm_stripe_count;
151 * To calculate maximum easize by active targets at present,
152 * which is exactly the maximum easize to be seen by LOV
154 stripe_count = lov->desc.ld_active_tgt_count;
157 /* XXX LOV STACKING call into osc for sizes */
158 lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
164 stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
165 lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
172 *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
177 CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
178 lmm_magic, lmm_size);
181 lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
182 if (lmm_magic == LOV_MAGIC_V3)
183 lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
185 lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
190 /* lmmv1 and lmmv3 point to the same struct and have the
193 lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
194 lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
195 lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
196 lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
197 lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
198 if (lsm->lsm_magic == LOV_MAGIC_V3) {
199 cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
200 sizeof(lmmv3->lmm_pool_name));
201 if (cplen >= sizeof(lmmv3->lmm_pool_name))
203 lmm_objects = lmmv3->lmm_objects;
205 lmm_objects = lmmv1->lmm_objects;
208 for (i = 0; i < stripe_count; i++) {
209 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
210 /* XXX LOV STACKING call down to osc_packmd() to do packing */
211 LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
212 " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
213 i, stripe_count, loi->loi_ost_idx);
214 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
215 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
216 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
222 int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
223 struct lov_stripe_md *lsm)
225 struct obd_device *obd = class_exp2obd(exp);
226 struct lov_obd *lov = &obd->u.lov;
228 return lov_obd_packmd(lov, lmmp, lsm);
231 /* Find the max stripecount we should use */
232 __u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
234 __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
237 stripe_count = lov->desc.ld_default_stripe_count;
238 if (stripe_count > lov->desc.ld_active_tgt_count)
239 stripe_count = lov->desc.ld_active_tgt_count;
243 /* stripe count is based on whether ldiskfs can handle
246 if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
247 lov->lov_ocd.ocd_max_easize)
248 max_stripes = lov_mds_md_max_stripe_count(
249 lov->lov_ocd.ocd_max_easize, magic);
251 if (stripe_count > max_stripes)
252 stripe_count = max_stripes;
257 static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
261 if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) {
262 CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
263 le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
264 CERROR("%*phN\n", lmm_bytes, lmm);
267 rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
273 int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
274 int pattern, int magic)
278 CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
280 *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
282 CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
286 atomic_set(&(*lsmp)->lsm_refc, 1);
287 spin_lock_init(&(*lsmp)->lsm_lock);
288 (*lsmp)->lsm_magic = magic;
289 (*lsmp)->lsm_stripe_count = stripe_count;
290 (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
291 (*lsmp)->lsm_pattern = pattern;
292 (*lsmp)->lsm_pool_name[0] = '\0';
293 (*lsmp)->lsm_layout_gen = 0;
294 if (stripe_count > 0)
295 (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
297 for (i = 0; i < stripe_count; i++)
298 loi_init((*lsmp)->lsm_oinfo[i]);
303 int lov_free_memmd(struct lov_stripe_md **lsmp)
305 struct lov_stripe_md *lsm = *lsmp;
309 LASSERT(atomic_read(&lsm->lsm_refc) > 0);
310 refc = atomic_dec_return(&lsm->lsm_refc);
312 lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
317 /* Unpack LOV object metadata from disk storage. It is packed in LE byte
318 * order and is opaque to the networking layer.
320 int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
321 struct lov_mds_md *lmm, int lmm_bytes)
323 struct obd_device *obd = class_exp2obd(exp);
324 struct lov_obd *lov = &obd->u.lov;
325 int rc = 0, lsm_size;
330 /* If passed an MDS struct use values from there, otherwise defaults */
332 rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
335 magic = le32_to_cpu(lmm->lmm_magic);
336 pattern = le32_to_cpu(lmm->lmm_pattern);
339 stripe_count = lov_get_stripecnt(lov, magic, 0);
340 pattern = LOV_PATTERN_RAID0;
343 /* If we aren't passed an lsmp struct, we just want the size */
345 /* XXX LOV STACKING call into osc for sizes */
347 return lov_stripe_md_size(stripe_count);
349 /* If we are passed an allocated struct but nothing to unpack, free */
351 lov_free_memmd(lsmp);
355 lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
359 /* If we are passed a pointer but nothing to unpack, we only alloc */
363 rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
365 lov_free_memmd(lsmp);
372 /* Retrieve object striping information.
374 * @lump is a pointer to an in-core struct with lmm_ost_count indicating
375 * the maximum number of OST indices which will fit in the user buffer.
376 * lmm_magic must be LOV_USER_MAGIC.
378 int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
379 struct lov_user_md __user *lump)
382 * XXX huge struct allocated on stack.
384 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
386 struct lov_user_md_v3 lum;
387 struct lov_mds_md *lmmk = NULL;
388 int rc, lmmk_size, lmm_size;
394 /* we only need the header part from user space to get lmm_magic and
395 * lmm_stripe_count, (the header part is common to v1 and v3)
397 lum_size = sizeof(struct lov_user_md_v1);
398 if (copy_from_user(&lum, lump, lum_size)) {
402 if (lum.lmm_magic != LOV_USER_MAGIC_V1 &&
403 lum.lmm_magic != LOV_USER_MAGIC_V3 &&
404 lum.lmm_magic != LOV_USER_MAGIC_SPECIFIC) {
409 if (lum.lmm_stripe_count &&
410 (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
411 /* Return right size of stripe to user */
412 lum.lmm_stripe_count = lsm->lsm_stripe_count;
413 rc = copy_to_user(lump, &lum, lum_size);
417 lov = lu2lov_dev(obj->lo_cl.co_lu.lo_dev)->ld_lov;
418 rc = lov_obd_packmd(lov, &lmmk, lsm);
425 /* FIXME: Bug 1185 - copy fields properly when structs change */
426 /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
427 CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
428 CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
430 if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
431 ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
432 (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
433 lustre_swab_lov_mds_md(lmmk);
434 lustre_swab_lov_user_md_objects(
435 (struct lov_user_ost_data *)lmmk->lmm_objects,
436 lmmk->lmm_stripe_count);
438 if (lum.lmm_magic == LOV_USER_MAGIC) {
439 /* User request for v1, we need skip lmm_pool_name */
440 if (lmmk->lmm_magic == LOV_MAGIC_V3) {
441 memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
442 ((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
443 lmmk->lmm_stripe_count *
444 sizeof(struct lov_ost_data_v1));
445 lmm_size -= LOV_MAXPOOLNAME;
448 /* if v3 we just have to update the lum_size */
449 lum_size = sizeof(struct lov_user_md_v3);
452 /* User wasn't expecting this many OST entries */
453 if (lum.lmm_stripe_count == 0) {
455 } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
460 * Have a difference between lov_mds_md & lov_user_md.
461 * So we have to re-order the data before copy to user.
463 lum.lmm_stripe_count = lmmk->lmm_stripe_count;
464 lum.lmm_layout_gen = lmmk->lmm_layout_gen;
465 ((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
466 ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
467 if (copy_to_user(lump, lmmk, lmm_size))