GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / nvdimm / dimm_devs.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/ndctl.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include "nd-core.h"
22 #include "label.h"
23 #include "pmem.h"
24 #include "nd.h"
25
26 static DEFINE_IDA(dimm_ida);
27
28 /*
29  * Retrieve bus and dimm handle and return if this bus supports
30  * get_config_data commands
31  */
32 int nvdimm_check_config_data(struct device *dev)
33 {
34         struct nvdimm *nvdimm = to_nvdimm(dev);
35
36         if (!nvdimm->cmd_mask ||
37             !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
38                 if (test_bit(NDD_ALIASING, &nvdimm->flags))
39                         return -ENXIO;
40                 else
41                         return -ENOTTY;
42         }
43
44         return 0;
45 }
46
47 static int validate_dimm(struct nvdimm_drvdata *ndd)
48 {
49         int rc;
50
51         if (!ndd)
52                 return -EINVAL;
53
54         rc = nvdimm_check_config_data(ndd->dev);
55         if (rc)
56                 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
57                                 __builtin_return_address(0), __func__, rc);
58         return rc;
59 }
60
61 /**
62  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
63  * @nvdimm: dimm to initialize
64  */
65 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
66 {
67         struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
68         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
69         struct nvdimm_bus_descriptor *nd_desc;
70         int rc = validate_dimm(ndd);
71         int cmd_rc = 0;
72
73         if (rc)
74                 return rc;
75
76         if (cmd->config_size)
77                 return 0; /* already valid */
78
79         memset(cmd, 0, sizeof(*cmd));
80         nd_desc = nvdimm_bus->nd_desc;
81         rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
82                         ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
83         if (rc < 0)
84                 return rc;
85         return cmd_rc;
86 }
87
88 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
89 {
90         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
91         int rc = validate_dimm(ndd), cmd_rc = 0;
92         struct nd_cmd_get_config_data_hdr *cmd;
93         struct nvdimm_bus_descriptor *nd_desc;
94         u32 max_cmd_size, config_size;
95         size_t offset;
96
97         if (rc)
98                 return rc;
99
100         if (ndd->data)
101                 return 0;
102
103         if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
104                         || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
105                 dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
106                                 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
107                 return -ENXIO;
108         }
109
110         ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
111         if (!ndd->data)
112                 return -ENOMEM;
113
114         max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
115         cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
116         if (!cmd)
117                 return -ENOMEM;
118
119         nd_desc = nvdimm_bus->nd_desc;
120         for (config_size = ndd->nsarea.config_size, offset = 0;
121                         config_size; config_size -= cmd->in_length,
122                         offset += cmd->in_length) {
123                 cmd->in_length = min(config_size, max_cmd_size);
124                 cmd->in_offset = offset;
125                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
126                                 ND_CMD_GET_CONFIG_DATA, cmd,
127                                 cmd->in_length + sizeof(*cmd), &cmd_rc);
128                 if (rc < 0)
129                         break;
130                 if (cmd_rc < 0) {
131                         rc = cmd_rc;
132                         break;
133                 }
134                 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
135         }
136         dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
137         kfree(cmd);
138
139         return rc;
140 }
141
142 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
143                 void *buf, size_t len)
144 {
145         size_t max_cmd_size, buf_offset;
146         struct nd_cmd_set_config_hdr *cmd;
147         int rc = validate_dimm(ndd), cmd_rc = 0;
148         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
149         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
150
151         if (rc)
152                 return rc;
153
154         if (!ndd->data)
155                 return -ENXIO;
156
157         if (offset + len > ndd->nsarea.config_size)
158                 return -ENXIO;
159
160         max_cmd_size = min_t(u32, PAGE_SIZE, len);
161         max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
162         cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
163         if (!cmd)
164                 return -ENOMEM;
165
166         for (buf_offset = 0; len; len -= cmd->in_length,
167                         buf_offset += cmd->in_length) {
168                 size_t cmd_size;
169
170                 cmd->in_offset = offset + buf_offset;
171                 cmd->in_length = min(max_cmd_size, len);
172                 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
173
174                 /* status is output in the last 4-bytes of the command buffer */
175                 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
176
177                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
178                                 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
179                 if (rc < 0)
180                         break;
181                 if (cmd_rc < 0) {
182                         rc = cmd_rc;
183                         break;
184                 }
185         }
186         kfree(cmd);
187
188         return rc;
189 }
190
191 void nvdimm_set_aliasing(struct device *dev)
192 {
193         struct nvdimm *nvdimm = to_nvdimm(dev);
194
195         set_bit(NDD_ALIASING, &nvdimm->flags);
196 }
197
198 void nvdimm_set_locked(struct device *dev)
199 {
200         struct nvdimm *nvdimm = to_nvdimm(dev);
201
202         set_bit(NDD_LOCKED, &nvdimm->flags);
203 }
204
205 void nvdimm_clear_locked(struct device *dev)
206 {
207         struct nvdimm *nvdimm = to_nvdimm(dev);
208
209         clear_bit(NDD_LOCKED, &nvdimm->flags);
210 }
211
212 static void nvdimm_release(struct device *dev)
213 {
214         struct nvdimm *nvdimm = to_nvdimm(dev);
215
216         ida_simple_remove(&dimm_ida, nvdimm->id);
217         kfree(nvdimm);
218 }
219
220 static struct device_type nvdimm_device_type = {
221         .name = "nvdimm",
222         .release = nvdimm_release,
223 };
224
225 bool is_nvdimm(struct device *dev)
226 {
227         return dev->type == &nvdimm_device_type;
228 }
229
230 struct nvdimm *to_nvdimm(struct device *dev)
231 {
232         struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
233
234         WARN_ON(!is_nvdimm(dev));
235         return nvdimm;
236 }
237 EXPORT_SYMBOL_GPL(to_nvdimm);
238
239 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
240 {
241         struct nd_region *nd_region = &ndbr->nd_region;
242         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
243
244         return nd_mapping->nvdimm;
245 }
246 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
247
248 unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
249 {
250         /* pmem mapping properties are private to libnvdimm */
251         return ARCH_MEMREMAP_PMEM;
252 }
253 EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
254
255 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
256 {
257         struct nvdimm *nvdimm = nd_mapping->nvdimm;
258
259         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
260
261         return dev_get_drvdata(&nvdimm->dev);
262 }
263 EXPORT_SYMBOL(to_ndd);
264
265 void nvdimm_drvdata_release(struct kref *kref)
266 {
267         struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
268         struct device *dev = ndd->dev;
269         struct resource *res, *_r;
270
271         dev_dbg(dev, "trace\n");
272         nvdimm_bus_lock(dev);
273         for_each_dpa_resource_safe(ndd, res, _r)
274                 nvdimm_free_dpa(ndd, res);
275         nvdimm_bus_unlock(dev);
276
277         kvfree(ndd->data);
278         kfree(ndd);
279         put_device(dev);
280 }
281
282 void get_ndd(struct nvdimm_drvdata *ndd)
283 {
284         kref_get(&ndd->kref);
285 }
286
287 void put_ndd(struct nvdimm_drvdata *ndd)
288 {
289         if (ndd)
290                 kref_put(&ndd->kref, nvdimm_drvdata_release);
291 }
292
293 const char *nvdimm_name(struct nvdimm *nvdimm)
294 {
295         return dev_name(&nvdimm->dev);
296 }
297 EXPORT_SYMBOL_GPL(nvdimm_name);
298
299 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
300 {
301         return &nvdimm->dev.kobj;
302 }
303 EXPORT_SYMBOL_GPL(nvdimm_kobj);
304
305 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
306 {
307         return nvdimm->cmd_mask;
308 }
309 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
310
311 void *nvdimm_provider_data(struct nvdimm *nvdimm)
312 {
313         if (nvdimm)
314                 return nvdimm->provider_data;
315         return NULL;
316 }
317 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
318
319 static ssize_t commands_show(struct device *dev,
320                 struct device_attribute *attr, char *buf)
321 {
322         struct nvdimm *nvdimm = to_nvdimm(dev);
323         int cmd, len = 0;
324
325         if (!nvdimm->cmd_mask)
326                 return sprintf(buf, "\n");
327
328         for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
329                 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
330         len += sprintf(buf + len, "\n");
331         return len;
332 }
333 static DEVICE_ATTR_RO(commands);
334
335 static ssize_t flags_show(struct device *dev,
336                 struct device_attribute *attr, char *buf)
337 {
338         struct nvdimm *nvdimm = to_nvdimm(dev);
339
340         return sprintf(buf, "%s%s\n",
341                         test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
342                         test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
343 }
344 static DEVICE_ATTR_RO(flags);
345
346 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
347                 char *buf)
348 {
349         struct nvdimm *nvdimm = to_nvdimm(dev);
350
351         /*
352          * The state may be in the process of changing, userspace should
353          * quiesce probing if it wants a static answer
354          */
355         nvdimm_bus_lock(dev);
356         nvdimm_bus_unlock(dev);
357         return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
358                         ? "active" : "idle");
359 }
360 static DEVICE_ATTR_RO(state);
361
362 static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
363 {
364         struct device *dev;
365         ssize_t rc;
366         u32 nfree;
367
368         if (!ndd)
369                 return -ENXIO;
370
371         dev = ndd->dev;
372         nvdimm_bus_lock(dev);
373         nfree = nd_label_nfree(ndd);
374         if (nfree - 1 > nfree) {
375                 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
376                 nfree = 0;
377         } else
378                 nfree--;
379         rc = sprintf(buf, "%d\n", nfree);
380         nvdimm_bus_unlock(dev);
381         return rc;
382 }
383
384 static ssize_t available_slots_show(struct device *dev,
385                                     struct device_attribute *attr, char *buf)
386 {
387         ssize_t rc;
388
389         device_lock(dev);
390         rc = __available_slots_show(dev_get_drvdata(dev), buf);
391         device_unlock(dev);
392
393         return rc;
394 }
395 static DEVICE_ATTR_RO(available_slots);
396
397 static struct attribute *nvdimm_attributes[] = {
398         &dev_attr_state.attr,
399         &dev_attr_flags.attr,
400         &dev_attr_commands.attr,
401         &dev_attr_available_slots.attr,
402         NULL,
403 };
404
405 struct attribute_group nvdimm_attribute_group = {
406         .attrs = nvdimm_attributes,
407 };
408 EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
409
410 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
411                 const struct attribute_group **groups, unsigned long flags,
412                 unsigned long cmd_mask, int num_flush,
413                 struct resource *flush_wpq)
414 {
415         struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
416         struct device *dev;
417
418         if (!nvdimm)
419                 return NULL;
420
421         nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
422         if (nvdimm->id < 0) {
423                 kfree(nvdimm);
424                 return NULL;
425         }
426         nvdimm->provider_data = provider_data;
427         nvdimm->flags = flags;
428         nvdimm->cmd_mask = cmd_mask;
429         nvdimm->num_flush = num_flush;
430         nvdimm->flush_wpq = flush_wpq;
431         atomic_set(&nvdimm->busy, 0);
432         dev = &nvdimm->dev;
433         dev_set_name(dev, "nmem%d", nvdimm->id);
434         dev->parent = &nvdimm_bus->dev;
435         dev->type = &nvdimm_device_type;
436         dev->devt = MKDEV(nvdimm_major, nvdimm->id);
437         dev->groups = groups;
438         nd_device_register(dev);
439
440         return nvdimm;
441 }
442 EXPORT_SYMBOL_GPL(nvdimm_create);
443
444 int alias_dpa_busy(struct device *dev, void *data)
445 {
446         resource_size_t map_end, blk_start, new;
447         struct blk_alloc_info *info = data;
448         struct nd_mapping *nd_mapping;
449         struct nd_region *nd_region;
450         struct nvdimm_drvdata *ndd;
451         struct resource *res;
452         int i;
453
454         if (!is_memory(dev))
455                 return 0;
456
457         nd_region = to_nd_region(dev);
458         for (i = 0; i < nd_region->ndr_mappings; i++) {
459                 nd_mapping  = &nd_region->mapping[i];
460                 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
461                         break;
462         }
463
464         if (i >= nd_region->ndr_mappings)
465                 return 0;
466
467         ndd = to_ndd(nd_mapping);
468         map_end = nd_mapping->start + nd_mapping->size - 1;
469         blk_start = nd_mapping->start;
470
471         /*
472          * In the allocation case ->res is set to free space that we are
473          * looking to validate against PMEM aliasing collision rules
474          * (i.e. BLK is allocated after all aliased PMEM).
475          */
476         if (info->res) {
477                 if (info->res->start >= nd_mapping->start
478                                 && info->res->start < map_end)
479                         /* pass */;
480                 else
481                         return 0;
482         }
483
484  retry:
485         /*
486          * Find the free dpa from the end of the last pmem allocation to
487          * the end of the interleave-set mapping.
488          */
489         for_each_dpa_resource(ndd, res) {
490                 if (strncmp(res->name, "pmem", 4) != 0)
491                         continue;
492                 if ((res->start >= blk_start && res->start < map_end)
493                                 || (res->end >= blk_start
494                                         && res->end <= map_end)) {
495                         new = max(blk_start, min(map_end + 1, res->end + 1));
496                         if (new != blk_start) {
497                                 blk_start = new;
498                                 goto retry;
499                         }
500                 }
501         }
502
503         /* update the free space range with the probed blk_start */
504         if (info->res && blk_start > info->res->start) {
505                 info->res->start = max(info->res->start, blk_start);
506                 if (info->res->start > info->res->end)
507                         info->res->end = info->res->start - 1;
508                 return 1;
509         }
510
511         info->available -= blk_start - nd_mapping->start;
512
513         return 0;
514 }
515
516 /**
517  * nd_blk_available_dpa - account the unused dpa of BLK region
518  * @nd_mapping: container of dpa-resource-root + labels
519  *
520  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
521  * we arrange for them to never start at an lower dpa than the last
522  * PMEM allocation in an aliased region.
523  */
524 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
525 {
526         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
527         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
528         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
529         struct blk_alloc_info info = {
530                 .nd_mapping = nd_mapping,
531                 .available = nd_mapping->size,
532                 .res = NULL,
533         };
534         struct resource *res;
535
536         if (!ndd)
537                 return 0;
538
539         device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
540
541         /* now account for busy blk allocations in unaliased dpa */
542         for_each_dpa_resource(ndd, res) {
543                 if (strncmp(res->name, "blk", 3) != 0)
544                         continue;
545                 info.available -= resource_size(res);
546         }
547
548         return info.available;
549 }
550
551 /**
552  * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
553  *                         contiguous unallocated dpa range.
554  * @nd_region: constrain available space check to this reference region
555  * @nd_mapping: container of dpa-resource-root + labels
556  */
557 resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
558                                            struct nd_mapping *nd_mapping)
559 {
560         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
561         struct nvdimm_bus *nvdimm_bus;
562         resource_size_t max = 0;
563         struct resource *res;
564
565         /* if a dimm is disabled the available capacity is zero */
566         if (!ndd)
567                 return 0;
568
569         nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
570         if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
571                 return 0;
572         for_each_dpa_resource(ndd, res) {
573                 if (strcmp(res->name, "pmem-reserve") != 0)
574                         continue;
575                 if (resource_size(res) > max)
576                         max = resource_size(res);
577         }
578         release_free_pmem(nvdimm_bus, nd_mapping);
579         return max;
580 }
581
582 /**
583  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
584  * @nd_mapping: container of dpa-resource-root + labels
585  * @nd_region: constrain available space check to this reference region
586  * @overlap: calculate available space assuming this level of overlap
587  *
588  * Validate that a PMEM label, if present, aligns with the start of an
589  * interleave set and truncate the available size at the lowest BLK
590  * overlap point.
591  *
592  * The expectation is that this routine is called multiple times as it
593  * probes for the largest BLK encroachment for any single member DIMM of
594  * the interleave set.  Once that value is determined the PMEM-limit for
595  * the set can be established.
596  */
597 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
598                 struct nd_mapping *nd_mapping, resource_size_t *overlap)
599 {
600         resource_size_t map_start, map_end, busy = 0, available, blk_start;
601         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
602         struct resource *res;
603         const char *reason;
604
605         if (!ndd)
606                 return 0;
607
608         map_start = nd_mapping->start;
609         map_end = map_start + nd_mapping->size - 1;
610         blk_start = max(map_start, map_end + 1 - *overlap);
611         for_each_dpa_resource(ndd, res) {
612                 if (res->start >= map_start && res->start < map_end) {
613                         if (strncmp(res->name, "blk", 3) == 0)
614                                 blk_start = min(blk_start,
615                                                 max(map_start, res->start));
616                         else if (res->end > map_end) {
617                                 reason = "misaligned to iset";
618                                 goto err;
619                         } else
620                                 busy += resource_size(res);
621                 } else if (res->end >= map_start && res->end <= map_end) {
622                         if (strncmp(res->name, "blk", 3) == 0) {
623                                 /*
624                                  * If a BLK allocation overlaps the start of
625                                  * PMEM the entire interleave set may now only
626                                  * be used for BLK.
627                                  */
628                                 blk_start = map_start;
629                         } else
630                                 busy += resource_size(res);
631                 } else if (map_start > res->start && map_start < res->end) {
632                         /* total eclipse of the mapping */
633                         busy += nd_mapping->size;
634                         blk_start = map_start;
635                 }
636         }
637
638         *overlap = map_end + 1 - blk_start;
639         available = blk_start - map_start;
640         if (busy < available)
641                 return available - busy;
642         return 0;
643
644  err:
645         nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
646         return 0;
647 }
648
649 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
650 {
651         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
652         kfree(res->name);
653         __release_region(&ndd->dpa, res->start, resource_size(res));
654 }
655
656 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
657                 struct nd_label_id *label_id, resource_size_t start,
658                 resource_size_t n)
659 {
660         char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
661         struct resource *res;
662
663         if (!name)
664                 return NULL;
665
666         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
667         res = __request_region(&ndd->dpa, start, n, name, 0);
668         if (!res)
669                 kfree(name);
670         return res;
671 }
672
673 /**
674  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
675  * @nvdimm: container of dpa-resource-root + labels
676  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
677  */
678 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
679                 struct nd_label_id *label_id)
680 {
681         resource_size_t allocated = 0;
682         struct resource *res;
683
684         for_each_dpa_resource(ndd, res)
685                 if (strcmp(res->name, label_id->id) == 0)
686                         allocated += resource_size(res);
687
688         return allocated;
689 }
690
691 static int count_dimms(struct device *dev, void *c)
692 {
693         int *count = c;
694
695         if (is_nvdimm(dev))
696                 (*count)++;
697         return 0;
698 }
699
700 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
701 {
702         int count = 0;
703         /* Flush any possible dimm registration failures */
704         nd_synchronize();
705
706         device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
707         dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
708         if (count != dimm_count)
709                 return -ENXIO;
710         return 0;
711 }
712 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
713
714 void __exit nvdimm_devs_exit(void)
715 {
716         ida_destroy(&dimm_ida);
717 }