blob: 4aa647c8d6441b6ea43a91b169bc2f607ff45a6e [file] [log] [blame]
Dan Williams3d880022015-05-31 15:02:11 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/slab.h>
16#include <linux/nd.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040017#include "nd-core.h"
Dan Williams3d880022015-05-31 15:02:11 -040018#include "nd.h"
19
20static void namespace_io_release(struct device *dev)
21{
22 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
23
24 kfree(nsio);
25}
26
Dan Williamsbf9bccc2015-06-17 17:14:46 -040027static void namespace_pmem_release(struct device *dev)
28{
29 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
30
31 kfree(nspm->alt_name);
32 kfree(nspm->uuid);
33 kfree(nspm);
34}
35
36static void namespace_blk_release(struct device *dev)
37{
Dan Williams1b40e092015-05-01 13:34:01 -040038 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
39 struct nd_region *nd_region = to_nd_region(dev->parent);
40
41 if (nsblk->id >= 0)
42 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
43 kfree(nsblk->alt_name);
44 kfree(nsblk->uuid);
45 kfree(nsblk->res);
46 kfree(nsblk);
Dan Williamsbf9bccc2015-06-17 17:14:46 -040047}
48
Dan Williams3d880022015-05-31 15:02:11 -040049static struct device_type namespace_io_device_type = {
50 .name = "nd_namespace_io",
51 .release = namespace_io_release,
52};
53
Dan Williamsbf9bccc2015-06-17 17:14:46 -040054static struct device_type namespace_pmem_device_type = {
55 .name = "nd_namespace_pmem",
56 .release = namespace_pmem_release,
57};
58
59static struct device_type namespace_blk_device_type = {
60 .name = "nd_namespace_blk",
61 .release = namespace_blk_release,
62};
63
64static bool is_namespace_pmem(struct device *dev)
65{
66 return dev ? dev->type == &namespace_pmem_device_type : false;
67}
68
69static bool is_namespace_blk(struct device *dev)
70{
71 return dev ? dev->type == &namespace_blk_device_type : false;
72}
73
74static bool is_namespace_io(struct device *dev)
75{
76 return dev ? dev->type == &namespace_io_device_type : false;
77}
78
Vishal Verma5212e112015-06-25 04:20:32 -040079const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
80 char *name)
81{
82 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
83 const char *suffix = "";
84
85 if (ndns->claim && is_nd_btt(ndns->claim))
86 suffix = "s";
87
88 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev))
89 sprintf(name, "pmem%d%s", nd_region->id, suffix);
90 else if (is_namespace_blk(&ndns->dev)) {
91 struct nd_namespace_blk *nsblk;
92
93 nsblk = to_nd_namespace_blk(&ndns->dev);
94 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix);
95 } else {
96 return NULL;
97 }
98
99 return name;
100}
101EXPORT_SYMBOL(nvdimm_namespace_disk_name);
102
Dan Williams3d880022015-05-31 15:02:11 -0400103static ssize_t nstype_show(struct device *dev,
104 struct device_attribute *attr, char *buf)
105{
106 struct nd_region *nd_region = to_nd_region(dev->parent);
107
108 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
109}
110static DEVICE_ATTR_RO(nstype);
111
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400112static ssize_t __alt_name_store(struct device *dev, const char *buf,
113 const size_t len)
114{
115 char *input, *pos, *alt_name, **ns_altname;
116 ssize_t rc;
117
118 if (is_namespace_pmem(dev)) {
119 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
120
121 ns_altname = &nspm->alt_name;
122 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400123 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
124
125 ns_altname = &nsblk->alt_name;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400126 } else
127 return -ENXIO;
128
Dan Williams8c2f7e82015-06-25 04:20:04 -0400129 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400130 return -EBUSY;
131
132 input = kmemdup(buf, len + 1, GFP_KERNEL);
133 if (!input)
134 return -ENOMEM;
135
136 input[len] = '\0';
137 pos = strim(input);
138 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
139 rc = -EINVAL;
140 goto out;
141 }
142
143 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
144 if (!alt_name) {
145 rc = -ENOMEM;
146 goto out;
147 }
148 kfree(*ns_altname);
149 *ns_altname = alt_name;
150 sprintf(*ns_altname, "%s", pos);
151 rc = len;
152
153out:
154 kfree(input);
155 return rc;
156}
157
Dan Williams1b40e092015-05-01 13:34:01 -0400158static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
159{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400160 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400161 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
162 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
163 struct nd_label_id label_id;
164 resource_size_t size = 0;
165 struct resource *res;
166
167 if (!nsblk->uuid)
168 return 0;
169 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
170 for_each_dpa_resource(ndd, res)
171 if (strcmp(res->name, label_id.id) == 0)
172 size += resource_size(res);
173 return size;
174}
175
Dan Williamsf524bf22015-05-30 12:36:02 -0400176static int nd_namespace_label_update(struct nd_region *nd_region,
177 struct device *dev)
178{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400179 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
Dan Williamsf524bf22015-05-30 12:36:02 -0400180 "namespace must be idle during label update\n");
Dan Williams8c2f7e82015-06-25 04:20:04 -0400181 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsf524bf22015-05-30 12:36:02 -0400182 return 0;
183
184 /*
185 * Only allow label writes that will result in a valid namespace
186 * or deletion of an existing namespace.
187 */
188 if (is_namespace_pmem(dev)) {
189 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
Dan Williams0ba1c632015-05-30 12:35:36 -0400190 resource_size_t size = resource_size(&nspm->nsio.res);
Dan Williamsf524bf22015-05-30 12:36:02 -0400191
192 if (size == 0 && nspm->uuid)
193 /* delete allocation */;
194 else if (!nspm->uuid)
195 return 0;
196
197 return nd_pmem_namespace_label_update(nd_region, nspm, size);
198 } else if (is_namespace_blk(dev)) {
Dan Williams0ba1c632015-05-30 12:35:36 -0400199 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
200 resource_size_t size = nd_namespace_blk_size(nsblk);
201
202 if (size == 0 && nsblk->uuid)
203 /* delete allocation */;
204 else if (!nsblk->uuid || !nsblk->lbasize)
205 return 0;
206
207 return nd_blk_namespace_label_update(nd_region, nsblk, size);
Dan Williamsf524bf22015-05-30 12:36:02 -0400208 } else
209 return -ENXIO;
210}
211
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400212static ssize_t alt_name_store(struct device *dev,
213 struct device_attribute *attr, const char *buf, size_t len)
214{
Dan Williamsf524bf22015-05-30 12:36:02 -0400215 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400216 ssize_t rc;
217
218 device_lock(dev);
219 nvdimm_bus_lock(dev);
220 wait_nvdimm_bus_probe_idle(dev);
221 rc = __alt_name_store(dev, buf, len);
Dan Williamsf524bf22015-05-30 12:36:02 -0400222 if (rc >= 0)
223 rc = nd_namespace_label_update(nd_region, dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400224 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
225 nvdimm_bus_unlock(dev);
226 device_unlock(dev);
227
Dan Williamsf524bf22015-05-30 12:36:02 -0400228 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400229}
230
231static ssize_t alt_name_show(struct device *dev,
232 struct device_attribute *attr, char *buf)
233{
234 char *ns_altname;
235
236 if (is_namespace_pmem(dev)) {
237 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
238
239 ns_altname = nspm->alt_name;
240 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400241 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
242
243 ns_altname = nsblk->alt_name;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400244 } else
245 return -ENXIO;
246
247 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
248}
249static DEVICE_ATTR_RW(alt_name);
250
251static int scan_free(struct nd_region *nd_region,
252 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
253 resource_size_t n)
254{
255 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
256 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
257 int rc = 0;
258
259 while (n) {
260 struct resource *res, *last;
261 resource_size_t new_start;
262
263 last = NULL;
264 for_each_dpa_resource(ndd, res)
265 if (strcmp(res->name, label_id->id) == 0)
266 last = res;
267 res = last;
268 if (!res)
269 return 0;
270
271 if (n >= resource_size(res)) {
272 n -= resource_size(res);
273 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
274 nvdimm_free_dpa(ndd, res);
275 /* retry with last resource deleted */
276 continue;
277 }
278
279 /*
280 * Keep BLK allocations relegated to high DPA as much as
281 * possible
282 */
283 if (is_blk)
284 new_start = res->start + n;
285 else
286 new_start = res->start;
287
288 rc = adjust_resource(res, new_start, resource_size(res) - n);
Dan Williams1b40e092015-05-01 13:34:01 -0400289 if (rc == 0)
290 res->flags |= DPA_RESOURCE_ADJUSTED;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400291 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
292 break;
293 }
294
295 return rc;
296}
297
298/**
299 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
300 * @nd_region: the set of dimms to reclaim @n bytes from
301 * @label_id: unique identifier for the namespace consuming this dpa range
302 * @n: number of bytes per-dimm to release
303 *
304 * Assumes resources are ordered. Starting from the end try to
305 * adjust_resource() the allocation to @n, but if @n is larger than the
306 * allocation delete it and find the 'new' last allocation in the label
307 * set.
308 */
309static int shrink_dpa_allocation(struct nd_region *nd_region,
310 struct nd_label_id *label_id, resource_size_t n)
311{
312 int i;
313
314 for (i = 0; i < nd_region->ndr_mappings; i++) {
315 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
316 int rc;
317
318 rc = scan_free(nd_region, nd_mapping, label_id, n);
319 if (rc)
320 return rc;
321 }
322
323 return 0;
324}
325
326static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
327 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
328 resource_size_t n)
329{
330 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
331 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
332 resource_size_t first_dpa;
333 struct resource *res;
334 int rc = 0;
335
336 /* allocate blk from highest dpa first */
337 if (is_blk)
338 first_dpa = nd_mapping->start + nd_mapping->size - n;
339 else
340 first_dpa = nd_mapping->start;
341
342 /* first resource allocation for this label-id or dimm */
343 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
344 if (!res)
345 rc = -EBUSY;
346
347 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
348 return rc ? n : 0;
349}
350
Dan Williams1b40e092015-05-01 13:34:01 -0400351static bool space_valid(bool is_pmem, bool is_reserve,
352 struct nd_label_id *label_id, struct resource *res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400353{
354 /*
355 * For BLK-space any space is valid, for PMEM-space, it must be
Dan Williams1b40e092015-05-01 13:34:01 -0400356 * contiguous with an existing allocation unless we are
357 * reserving pmem.
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400358 */
Dan Williams1b40e092015-05-01 13:34:01 -0400359 if (is_reserve || !is_pmem)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400360 return true;
361 if (!res || strcmp(res->name, label_id->id) == 0)
362 return true;
363 return false;
364}
365
366enum alloc_loc {
367 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
368};
369
370static resource_size_t scan_allocate(struct nd_region *nd_region,
371 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
372 resource_size_t n)
373{
374 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
Dan Williams1b40e092015-05-01 13:34:01 -0400375 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400376 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
377 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
378 const resource_size_t to_allocate = n;
379 struct resource *res;
380 int first;
381
382 retry:
383 first = 0;
384 for_each_dpa_resource(ndd, res) {
385 resource_size_t allocate, available = 0, free_start, free_end;
386 struct resource *next = res->sibling, *new_res = NULL;
387 enum alloc_loc loc = ALLOC_ERR;
388 const char *action;
389 int rc = 0;
390
391 /* ignore resources outside this nd_mapping */
392 if (res->start > mapping_end)
393 continue;
394 if (res->end < nd_mapping->start)
395 continue;
396
397 /* space at the beginning of the mapping */
398 if (!first++ && res->start > nd_mapping->start) {
399 free_start = nd_mapping->start;
400 available = res->start - free_start;
Dan Williams1b40e092015-05-01 13:34:01 -0400401 if (space_valid(is_pmem, is_reserve, label_id, NULL))
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400402 loc = ALLOC_BEFORE;
403 }
404
405 /* space between allocations */
406 if (!loc && next) {
407 free_start = res->start + resource_size(res);
408 free_end = min(mapping_end, next->start - 1);
Dan Williams1b40e092015-05-01 13:34:01 -0400409 if (space_valid(is_pmem, is_reserve, label_id, res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400410 && free_start < free_end) {
411 available = free_end + 1 - free_start;
412 loc = ALLOC_MID;
413 }
414 }
415
416 /* space at the end of the mapping */
417 if (!loc && !next) {
418 free_start = res->start + resource_size(res);
419 free_end = mapping_end;
Dan Williams1b40e092015-05-01 13:34:01 -0400420 if (space_valid(is_pmem, is_reserve, label_id, res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400421 && free_start < free_end) {
422 available = free_end + 1 - free_start;
423 loc = ALLOC_AFTER;
424 }
425 }
426
427 if (!loc || !available)
428 continue;
429 allocate = min(available, n);
430 switch (loc) {
431 case ALLOC_BEFORE:
432 if (strcmp(res->name, label_id->id) == 0) {
433 /* adjust current resource up */
Dan Williams1b40e092015-05-01 13:34:01 -0400434 if (is_pmem && !is_reserve)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400435 return n;
436 rc = adjust_resource(res, res->start - allocate,
437 resource_size(res) + allocate);
438 action = "cur grow up";
439 } else
440 action = "allocate";
441 break;
442 case ALLOC_MID:
443 if (strcmp(next->name, label_id->id) == 0) {
444 /* adjust next resource up */
Dan Williams1b40e092015-05-01 13:34:01 -0400445 if (is_pmem && !is_reserve)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400446 return n;
447 rc = adjust_resource(next, next->start
448 - allocate, resource_size(next)
449 + allocate);
450 new_res = next;
451 action = "next grow up";
452 } else if (strcmp(res->name, label_id->id) == 0) {
453 action = "grow down";
454 } else
455 action = "allocate";
456 break;
457 case ALLOC_AFTER:
458 if (strcmp(res->name, label_id->id) == 0)
459 action = "grow down";
460 else
461 action = "allocate";
462 break;
463 default:
464 return n;
465 }
466
467 if (strcmp(action, "allocate") == 0) {
468 /* BLK allocate bottom up */
469 if (!is_pmem)
470 free_start += available - allocate;
Dan Williams1b40e092015-05-01 13:34:01 -0400471 else if (!is_reserve && free_start != nd_mapping->start)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400472 return n;
473
474 new_res = nvdimm_allocate_dpa(ndd, label_id,
475 free_start, allocate);
476 if (!new_res)
477 rc = -EBUSY;
478 } else if (strcmp(action, "grow down") == 0) {
479 /* adjust current resource down */
480 rc = adjust_resource(res, res->start, resource_size(res)
481 + allocate);
Dan Williams1b40e092015-05-01 13:34:01 -0400482 if (rc == 0)
483 res->flags |= DPA_RESOURCE_ADJUSTED;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400484 }
485
486 if (!new_res)
487 new_res = res;
488
489 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
490 action, loc, rc);
491
492 if (rc)
493 return n;
494
495 n -= allocate;
496 if (n) {
497 /*
498 * Retry scan with newly inserted resources.
499 * For example, if we did an ALLOC_BEFORE
500 * insertion there may also have been space
501 * available for an ALLOC_AFTER insertion, so we
502 * need to check this same resource again
503 */
504 goto retry;
505 } else
506 return 0;
507 }
508
Dan Williams1b40e092015-05-01 13:34:01 -0400509 /*
510 * If we allocated nothing in the BLK case it may be because we are in
511 * an initial "pmem-reserve pass". Only do an initial BLK allocation
512 * when none of the DPA space is reserved.
513 */
514 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400515 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
516 return n;
517}
518
Dan Williams1b40e092015-05-01 13:34:01 -0400519static int merge_dpa(struct nd_region *nd_region,
520 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
521{
522 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
523 struct resource *res;
524
525 if (strncmp("pmem", label_id->id, 4) == 0)
526 return 0;
527 retry:
528 for_each_dpa_resource(ndd, res) {
529 int rc;
530 struct resource *next = res->sibling;
531 resource_size_t end = res->start + resource_size(res);
532
533 if (!next || strcmp(res->name, label_id->id) != 0
534 || strcmp(next->name, label_id->id) != 0
535 || end != next->start)
536 continue;
537 end += resource_size(next);
538 nvdimm_free_dpa(ndd, next);
539 rc = adjust_resource(res, res->start, end - res->start);
540 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
541 if (rc)
542 return rc;
543 res->flags |= DPA_RESOURCE_ADJUSTED;
544 goto retry;
545 }
546
547 return 0;
548}
549
550static int __reserve_free_pmem(struct device *dev, void *data)
551{
552 struct nvdimm *nvdimm = data;
553 struct nd_region *nd_region;
554 struct nd_label_id label_id;
555 int i;
556
557 if (!is_nd_pmem(dev))
558 return 0;
559
560 nd_region = to_nd_region(dev);
561 if (nd_region->ndr_mappings == 0)
562 return 0;
563
564 memset(&label_id, 0, sizeof(label_id));
565 strcat(label_id.id, "pmem-reserve");
566 for (i = 0; i < nd_region->ndr_mappings; i++) {
567 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
568 resource_size_t n, rem = 0;
569
570 if (nd_mapping->nvdimm != nvdimm)
571 continue;
572
573 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
574 if (n == 0)
575 return 0;
576 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
577 dev_WARN_ONCE(&nd_region->dev, rem,
578 "pmem reserve underrun: %#llx of %#llx bytes\n",
579 (unsigned long long) n - rem,
580 (unsigned long long) n);
581 return rem ? -ENXIO : 0;
582 }
583
584 return 0;
585}
586
587static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
588 struct nd_mapping *nd_mapping)
589{
590 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
591 struct resource *res, *_res;
592
593 for_each_dpa_resource_safe(ndd, res, _res)
594 if (strcmp(res->name, "pmem-reserve") == 0)
595 nvdimm_free_dpa(ndd, res);
596}
597
598static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
599 struct nd_mapping *nd_mapping)
600{
601 struct nvdimm *nvdimm = nd_mapping->nvdimm;
602 int rc;
603
604 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
605 __reserve_free_pmem);
606 if (rc)
607 release_free_pmem(nvdimm_bus, nd_mapping);
608 return rc;
609}
610
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400611/**
612 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
613 * @nd_region: the set of dimms to allocate @n more bytes from
614 * @label_id: unique identifier for the namespace consuming this dpa range
615 * @n: number of bytes per-dimm to add to the existing allocation
616 *
617 * Assumes resources are ordered. For BLK regions, first consume
618 * BLK-only available DPA free space, then consume PMEM-aliased DPA
619 * space starting at the highest DPA. For PMEM regions start
620 * allocations from the start of an interleave set and end at the first
621 * BLK allocation or the end of the interleave set, whichever comes
622 * first.
623 */
624static int grow_dpa_allocation(struct nd_region *nd_region,
625 struct nd_label_id *label_id, resource_size_t n)
626{
Dan Williams1b40e092015-05-01 13:34:01 -0400627 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
628 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400629 int i;
630
631 for (i = 0; i < nd_region->ndr_mappings; i++) {
632 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williams1b40e092015-05-01 13:34:01 -0400633 resource_size_t rem = n;
634 int rc, j;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400635
Dan Williams1b40e092015-05-01 13:34:01 -0400636 /*
637 * In the BLK case try once with all unallocated PMEM
638 * reserved, and once without
639 */
640 for (j = is_pmem; j < 2; j++) {
641 bool blk_only = j == 0;
642
643 if (blk_only) {
644 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
645 if (rc)
646 return rc;
647 }
648 rem = scan_allocate(nd_region, nd_mapping,
649 label_id, rem);
650 if (blk_only)
651 release_free_pmem(nvdimm_bus, nd_mapping);
652
653 /* try again and allow encroachments into PMEM */
654 if (rem == 0)
655 break;
656 }
657
658 dev_WARN_ONCE(&nd_region->dev, rem,
659 "allocation underrun: %#llx of %#llx bytes\n",
660 (unsigned long long) n - rem,
661 (unsigned long long) n);
662 if (rem)
663 return -ENXIO;
664
665 rc = merge_dpa(nd_region, nd_mapping, label_id);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400666 if (rc)
667 return rc;
668 }
669
670 return 0;
671}
672
673static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
674 struct nd_namespace_pmem *nspm, resource_size_t size)
675{
676 struct resource *res = &nspm->nsio.res;
677
678 res->start = nd_region->ndr_start;
679 res->end = nd_region->ndr_start + size - 1;
680}
681
682static ssize_t __size_store(struct device *dev, unsigned long long val)
683{
684 resource_size_t allocated = 0, available = 0;
685 struct nd_region *nd_region = to_nd_region(dev->parent);
686 struct nd_mapping *nd_mapping;
687 struct nvdimm_drvdata *ndd;
688 struct nd_label_id label_id;
689 u32 flags = 0, remainder;
690 u8 *uuid = NULL;
691 int rc, i;
692
Dan Williams8c2f7e82015-06-25 04:20:04 -0400693 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400694 return -EBUSY;
695
696 if (is_namespace_pmem(dev)) {
697 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
698
699 uuid = nspm->uuid;
700 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400701 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
702
703 uuid = nsblk->uuid;
704 flags = NSLABEL_FLAG_LOCAL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400705 }
706
707 /*
708 * We need a uuid for the allocation-label and dimm(s) on which
709 * to store the label.
710 */
711 if (!uuid || nd_region->ndr_mappings == 0)
712 return -ENXIO;
713
714 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
715 if (remainder) {
716 dev_dbg(dev, "%llu is not %dK aligned\n", val,
717 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
718 return -EINVAL;
719 }
720
721 nd_label_gen_id(&label_id, uuid, flags);
722 for (i = 0; i < nd_region->ndr_mappings; i++) {
723 nd_mapping = &nd_region->mapping[i];
724 ndd = to_ndd(nd_mapping);
725
726 /*
727 * All dimms in an interleave set, or the base dimm for a blk
728 * region, need to be enabled for the size to be changed.
729 */
730 if (!ndd)
731 return -ENXIO;
732
733 allocated += nvdimm_allocated_dpa(ndd, &label_id);
734 }
735 available = nd_region_available_dpa(nd_region);
736
737 if (val > available + allocated)
738 return -ENOSPC;
739
740 if (val == allocated)
741 return 0;
742
743 val = div_u64(val, nd_region->ndr_mappings);
744 allocated = div_u64(allocated, nd_region->ndr_mappings);
745 if (val < allocated)
746 rc = shrink_dpa_allocation(nd_region, &label_id,
747 allocated - val);
748 else
749 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
750
751 if (rc)
752 return rc;
753
754 if (is_namespace_pmem(dev)) {
755 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
756
757 nd_namespace_pmem_set_size(nd_region, nspm,
758 val * nd_region->ndr_mappings);
Dan Williams1b40e092015-05-01 13:34:01 -0400759 } else if (is_namespace_blk(dev)) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400760 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
761
Dan Williams1b40e092015-05-01 13:34:01 -0400762 /*
763 * Try to delete the namespace if we deleted all of its
Dan Williams8c2f7e82015-06-25 04:20:04 -0400764 * allocation, this is not the seed device for the
765 * region, and it is not actively claimed by a btt
766 * instance.
Dan Williams1b40e092015-05-01 13:34:01 -0400767 */
Dan Williams8c2f7e82015-06-25 04:20:04 -0400768 if (val == 0 && nd_region->ns_seed != dev
769 && !nsblk->common.claim)
Dan Williams1b40e092015-05-01 13:34:01 -0400770 nd_device_unregister(dev, ND_ASYNC);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400771 }
772
773 return rc;
774}
775
776static ssize_t size_store(struct device *dev,
777 struct device_attribute *attr, const char *buf, size_t len)
778{
Dan Williamsf524bf22015-05-30 12:36:02 -0400779 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400780 unsigned long long val;
781 u8 **uuid = NULL;
782 int rc;
783
784 rc = kstrtoull(buf, 0, &val);
785 if (rc)
786 return rc;
787
788 device_lock(dev);
789 nvdimm_bus_lock(dev);
790 wait_nvdimm_bus_probe_idle(dev);
791 rc = __size_store(dev, val);
Dan Williamsf524bf22015-05-30 12:36:02 -0400792 if (rc >= 0)
793 rc = nd_namespace_label_update(nd_region, dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400794
795 if (is_namespace_pmem(dev)) {
796 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
797
798 uuid = &nspm->uuid;
799 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400800 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
801
802 uuid = &nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400803 }
804
805 if (rc == 0 && val == 0 && uuid) {
806 /* setting size zero == 'delete namespace' */
807 kfree(*uuid);
808 *uuid = NULL;
809 }
810
811 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
812 ? "fail" : "success", rc);
813
814 nvdimm_bus_unlock(dev);
815 device_unlock(dev);
816
Dan Williamsf524bf22015-05-30 12:36:02 -0400817 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400818}
819
Dan Williams8c2f7e82015-06-25 04:20:04 -0400820resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400821{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400822 struct device *dev = &ndns->dev;
Dan Williams1b40e092015-05-01 13:34:01 -0400823
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400824 if (is_namespace_pmem(dev)) {
825 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
826
Dan Williams8c2f7e82015-06-25 04:20:04 -0400827 return resource_size(&nspm->nsio.res);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400828 } else if (is_namespace_blk(dev)) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400829 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400830 } else if (is_namespace_io(dev)) {
831 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
832
Dan Williams8c2f7e82015-06-25 04:20:04 -0400833 return resource_size(&nsio->res);
834 } else
835 WARN_ONCE(1, "unknown namespace type\n");
836 return 0;
837}
Dan Williams1b40e092015-05-01 13:34:01 -0400838
Dan Williams8c2f7e82015-06-25 04:20:04 -0400839resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
840{
841 resource_size_t size;
842
843 nvdimm_bus_lock(&ndns->dev);
844 size = __nvdimm_namespace_capacity(ndns);
845 nvdimm_bus_unlock(&ndns->dev);
846
847 return size;
848}
849EXPORT_SYMBOL(nvdimm_namespace_capacity);
850
851static ssize_t size_show(struct device *dev,
852 struct device_attribute *attr, char *buf)
853{
854 return sprintf(buf, "%llu\n", (unsigned long long)
855 nvdimm_namespace_capacity(to_ndns(dev)));
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400856}
857static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
858
859static ssize_t uuid_show(struct device *dev,
860 struct device_attribute *attr, char *buf)
861{
862 u8 *uuid;
863
864 if (is_namespace_pmem(dev)) {
865 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
866
867 uuid = nspm->uuid;
868 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400869 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
870
871 uuid = nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400872 } else
873 return -ENXIO;
874
875 if (uuid)
876 return sprintf(buf, "%pUb\n", uuid);
877 return sprintf(buf, "\n");
878}
879
880/**
881 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
882 * @nd_region: parent region so we can updates all dimms in the set
883 * @dev: namespace type for generating label_id
884 * @new_uuid: incoming uuid
885 * @old_uuid: reference to the uuid storage location in the namespace object
886 */
887static int namespace_update_uuid(struct nd_region *nd_region,
888 struct device *dev, u8 *new_uuid, u8 **old_uuid)
889{
890 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
891 struct nd_label_id old_label_id;
892 struct nd_label_id new_label_id;
Dan Williamsf524bf22015-05-30 12:36:02 -0400893 int i;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400894
Dan Williamsf524bf22015-05-30 12:36:02 -0400895 if (!nd_is_uuid_unique(dev, new_uuid))
896 return -EINVAL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400897
898 if (*old_uuid == NULL)
899 goto out;
900
Dan Williamsf524bf22015-05-30 12:36:02 -0400901 /*
902 * If we've already written a label with this uuid, then it's
903 * too late to rename because we can't reliably update the uuid
904 * without losing the old namespace. Userspace must delete this
905 * namespace to abandon the old uuid.
906 */
907 for (i = 0; i < nd_region->ndr_mappings; i++) {
908 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
909
910 /*
911 * This check by itself is sufficient because old_uuid
912 * would be NULL above if this uuid did not exist in the
913 * currently written set.
914 *
915 * FIXME: can we delete uuid with zero dpa allocated?
916 */
917 if (nd_mapping->labels)
918 return -EBUSY;
919 }
920
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400921 nd_label_gen_id(&old_label_id, *old_uuid, flags);
922 nd_label_gen_id(&new_label_id, new_uuid, flags);
923 for (i = 0; i < nd_region->ndr_mappings; i++) {
924 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
925 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
926 struct resource *res;
927
928 for_each_dpa_resource(ndd, res)
929 if (strcmp(res->name, old_label_id.id) == 0)
930 sprintf((void *) res->name, "%s",
931 new_label_id.id);
932 }
933 kfree(*old_uuid);
934 out:
935 *old_uuid = new_uuid;
936 return 0;
937}
938
939static ssize_t uuid_store(struct device *dev,
940 struct device_attribute *attr, const char *buf, size_t len)
941{
942 struct nd_region *nd_region = to_nd_region(dev->parent);
943 u8 *uuid = NULL;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400944 ssize_t rc = 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400945 u8 **ns_uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400946
947 if (is_namespace_pmem(dev)) {
948 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
949
950 ns_uuid = &nspm->uuid;
951 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400952 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
953
954 ns_uuid = &nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400955 } else
956 return -ENXIO;
957
958 device_lock(dev);
959 nvdimm_bus_lock(dev);
960 wait_nvdimm_bus_probe_idle(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400961 if (to_ndns(dev)->claim)
962 rc = -EBUSY;
963 if (rc >= 0)
964 rc = nd_uuid_store(dev, &uuid, buf, len);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400965 if (rc >= 0)
966 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
Dan Williamsf524bf22015-05-30 12:36:02 -0400967 if (rc >= 0)
968 rc = nd_namespace_label_update(nd_region, dev);
969 else
970 kfree(uuid);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400971 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
972 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
973 nvdimm_bus_unlock(dev);
974 device_unlock(dev);
975
Dan Williamsf524bf22015-05-30 12:36:02 -0400976 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400977}
978static DEVICE_ATTR_RW(uuid);
979
980static ssize_t resource_show(struct device *dev,
981 struct device_attribute *attr, char *buf)
982{
983 struct resource *res;
984
985 if (is_namespace_pmem(dev)) {
986 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
987
988 res = &nspm->nsio.res;
989 } else if (is_namespace_io(dev)) {
990 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
991
992 res = &nsio->res;
993 } else
994 return -ENXIO;
995
996 /* no address to convey if the namespace has no allocation */
997 if (resource_size(res) == 0)
998 return -ENXIO;
999 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1000}
1001static DEVICE_ATTR_RO(resource);
1002
Dan Williams1b40e092015-05-01 13:34:01 -04001003static const unsigned long ns_lbasize_supported[] = { 512, 0 };
1004
1005static ssize_t sector_size_show(struct device *dev,
1006 struct device_attribute *attr, char *buf)
1007{
1008 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1009
1010 if (!is_namespace_blk(dev))
1011 return -ENXIO;
1012
1013 return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1014}
1015
1016static ssize_t sector_size_store(struct device *dev,
1017 struct device_attribute *attr, const char *buf, size_t len)
1018{
1019 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
Dan Williamsf524bf22015-05-30 12:36:02 -04001020 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001021 ssize_t rc = 0;
Dan Williams1b40e092015-05-01 13:34:01 -04001022
1023 if (!is_namespace_blk(dev))
1024 return -ENXIO;
1025
1026 device_lock(dev);
1027 nvdimm_bus_lock(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001028 if (to_ndns(dev)->claim)
1029 rc = -EBUSY;
1030 if (rc >= 0)
1031 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1032 ns_lbasize_supported);
Dan Williamsf524bf22015-05-30 12:36:02 -04001033 if (rc >= 0)
1034 rc = nd_namespace_label_update(nd_region, dev);
1035 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1036 rc, rc < 0 ? "tried" : "wrote", buf,
1037 buf[len - 1] == '\n' ? "" : "\n");
Dan Williams1b40e092015-05-01 13:34:01 -04001038 nvdimm_bus_unlock(dev);
1039 device_unlock(dev);
1040
1041 return rc ? rc : len;
1042}
1043static DEVICE_ATTR_RW(sector_size);
1044
Dan Williams0ba1c632015-05-30 12:35:36 -04001045static ssize_t dpa_extents_show(struct device *dev,
1046 struct device_attribute *attr, char *buf)
1047{
1048 struct nd_region *nd_region = to_nd_region(dev->parent);
1049 struct nd_label_id label_id;
1050 int count = 0, i;
1051 u8 *uuid = NULL;
1052 u32 flags = 0;
1053
1054 nvdimm_bus_lock(dev);
1055 if (is_namespace_pmem(dev)) {
1056 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1057
1058 uuid = nspm->uuid;
1059 flags = 0;
1060 } else if (is_namespace_blk(dev)) {
1061 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1062
1063 uuid = nsblk->uuid;
1064 flags = NSLABEL_FLAG_LOCAL;
1065 }
1066
1067 if (!uuid)
1068 goto out;
1069
1070 nd_label_gen_id(&label_id, uuid, flags);
1071 for (i = 0; i < nd_region->ndr_mappings; i++) {
1072 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1073 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1074 struct resource *res;
1075
1076 for_each_dpa_resource(ndd, res)
1077 if (strcmp(res->name, label_id.id) == 0)
1078 count++;
1079 }
1080 out:
1081 nvdimm_bus_unlock(dev);
1082
1083 return sprintf(buf, "%d\n", count);
1084}
1085static DEVICE_ATTR_RO(dpa_extents);
1086
Dan Williams8c2f7e82015-06-25 04:20:04 -04001087static ssize_t holder_show(struct device *dev,
1088 struct device_attribute *attr, char *buf)
1089{
1090 struct nd_namespace_common *ndns = to_ndns(dev);
1091 ssize_t rc;
1092
1093 device_lock(dev);
1094 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1095 device_unlock(dev);
1096
1097 return rc;
1098}
1099static DEVICE_ATTR_RO(holder);
1100
1101static ssize_t force_raw_store(struct device *dev,
1102 struct device_attribute *attr, const char *buf, size_t len)
1103{
1104 bool force_raw;
1105 int rc = strtobool(buf, &force_raw);
1106
1107 if (rc)
1108 return rc;
1109
1110 to_ndns(dev)->force_raw = force_raw;
1111 return len;
1112}
1113
1114static ssize_t force_raw_show(struct device *dev,
1115 struct device_attribute *attr, char *buf)
1116{
1117 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1118}
1119static DEVICE_ATTR_RW(force_raw);
1120
Dan Williams3d880022015-05-31 15:02:11 -04001121static struct attribute *nd_namespace_attributes[] = {
1122 &dev_attr_nstype.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001123 &dev_attr_size.attr,
1124 &dev_attr_uuid.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -04001125 &dev_attr_holder.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001126 &dev_attr_resource.attr,
1127 &dev_attr_alt_name.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -04001128 &dev_attr_force_raw.attr,
Dan Williams1b40e092015-05-01 13:34:01 -04001129 &dev_attr_sector_size.attr,
Dan Williams0ba1c632015-05-30 12:35:36 -04001130 &dev_attr_dpa_extents.attr,
Dan Williams3d880022015-05-31 15:02:11 -04001131 NULL,
1132};
1133
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001134static umode_t namespace_visible(struct kobject *kobj,
1135 struct attribute *a, int n)
1136{
1137 struct device *dev = container_of(kobj, struct device, kobj);
1138
1139 if (a == &dev_attr_resource.attr) {
1140 if (is_namespace_blk(dev))
1141 return 0;
1142 return a->mode;
1143 }
1144
1145 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1146 if (a == &dev_attr_size.attr)
1147 return S_IWUSR | S_IRUGO;
Dan Williams1b40e092015-05-01 13:34:01 -04001148
1149 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1150 return 0;
1151
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001152 return a->mode;
1153 }
1154
Dan Williams8c2f7e82015-06-25 04:20:04 -04001155 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1156 || a == &dev_attr_holder.attr
1157 || a == &dev_attr_force_raw.attr)
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001158 return a->mode;
1159
1160 return 0;
1161}
1162
Dan Williams3d880022015-05-31 15:02:11 -04001163static struct attribute_group nd_namespace_attribute_group = {
1164 .attrs = nd_namespace_attributes,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001165 .is_visible = namespace_visible,
Dan Williams3d880022015-05-31 15:02:11 -04001166};
1167
1168static const struct attribute_group *nd_namespace_attribute_groups[] = {
1169 &nd_device_attribute_group,
1170 &nd_namespace_attribute_group,
1171 NULL,
1172};
1173
Dan Williams8c2f7e82015-06-25 04:20:04 -04001174struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1175{
1176 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1177 struct nd_namespace_common *ndns;
1178 resource_size_t size;
1179
1180 if (nd_btt) {
1181 ndns = nd_btt->ndns;
1182 if (!ndns)
1183 return ERR_PTR(-ENODEV);
1184
1185 /*
1186 * Flush any in-progess probes / removals in the driver
1187 * for the raw personality of this namespace.
1188 */
1189 device_lock(&ndns->dev);
1190 device_unlock(&ndns->dev);
1191 if (ndns->dev.driver) {
1192 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1193 dev_name(&nd_btt->dev));
1194 return ERR_PTR(-EBUSY);
1195 }
1196 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != &nd_btt->dev,
1197 "host (%s) vs claim (%s) mismatch\n",
1198 dev_name(&nd_btt->dev),
1199 dev_name(ndns->claim)))
1200 return ERR_PTR(-ENXIO);
1201 } else {
1202 ndns = to_ndns(dev);
1203 if (ndns->claim) {
1204 dev_dbg(dev, "claimed by %s, failing probe\n",
1205 dev_name(ndns->claim));
1206
1207 return ERR_PTR(-ENXIO);
1208 }
1209 }
1210
1211 size = nvdimm_namespace_capacity(ndns);
1212 if (size < ND_MIN_NAMESPACE_SIZE) {
1213 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1214 &size, ND_MIN_NAMESPACE_SIZE);
1215 return ERR_PTR(-ENODEV);
1216 }
1217
1218 if (is_namespace_pmem(&ndns->dev)) {
1219 struct nd_namespace_pmem *nspm;
1220
1221 nspm = to_nd_namespace_pmem(&ndns->dev);
1222 if (!nspm->uuid) {
1223 dev_dbg(&ndns->dev, "%s: uuid not set\n", __func__);
1224 return ERR_PTR(-ENODEV);
1225 }
1226 } else if (is_namespace_blk(&ndns->dev)) {
1227 return ERR_PTR(-ENODEV); /* TODO */
1228 }
1229
1230 return ndns;
1231}
1232EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1233
Dan Williams3d880022015-05-31 15:02:11 -04001234static struct device **create_namespace_io(struct nd_region *nd_region)
1235{
1236 struct nd_namespace_io *nsio;
1237 struct device *dev, **devs;
1238 struct resource *res;
1239
1240 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1241 if (!nsio)
1242 return NULL;
1243
1244 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1245 if (!devs) {
1246 kfree(nsio);
1247 return NULL;
1248 }
1249
Dan Williams8c2f7e82015-06-25 04:20:04 -04001250 dev = &nsio->common.dev;
Dan Williams3d880022015-05-31 15:02:11 -04001251 dev->type = &namespace_io_device_type;
1252 dev->parent = &nd_region->dev;
1253 res = &nsio->res;
1254 res->name = dev_name(&nd_region->dev);
1255 res->flags = IORESOURCE_MEM;
1256 res->start = nd_region->ndr_start;
1257 res->end = res->start + nd_region->ndr_size - 1;
1258
1259 devs[0] = dev;
1260 return devs;
1261}
1262
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001263static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1264 u64 cookie, u16 pos)
1265{
1266 struct nd_namespace_label *found = NULL;
1267 int i;
1268
1269 for (i = 0; i < nd_region->ndr_mappings; i++) {
1270 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1271 struct nd_namespace_label *nd_label;
1272 bool found_uuid = false;
1273 int l;
1274
1275 for_each_label(l, nd_label, nd_mapping->labels) {
1276 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1277 u16 position = __le16_to_cpu(nd_label->position);
1278 u16 nlabel = __le16_to_cpu(nd_label->nlabel);
1279
1280 if (isetcookie != cookie)
1281 continue;
1282
1283 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1284 continue;
1285
1286 if (found_uuid) {
1287 dev_dbg(to_ndd(nd_mapping)->dev,
1288 "%s duplicate entry for uuid\n",
1289 __func__);
1290 return false;
1291 }
1292 found_uuid = true;
1293 if (nlabel != nd_region->ndr_mappings)
1294 continue;
1295 if (position != pos)
1296 continue;
1297 found = nd_label;
1298 break;
1299 }
1300 if (found)
1301 break;
1302 }
1303 return found != NULL;
1304}
1305
1306static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1307{
1308 struct nd_namespace_label *select = NULL;
1309 int i;
1310
1311 if (!pmem_id)
1312 return -ENODEV;
1313
1314 for (i = 0; i < nd_region->ndr_mappings; i++) {
1315 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1316 struct nd_namespace_label *nd_label;
1317 u64 hw_start, hw_end, pmem_start, pmem_end;
1318 int l;
1319
1320 for_each_label(l, nd_label, nd_mapping->labels)
1321 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1322 break;
1323
1324 if (!nd_label) {
1325 WARN_ON(1);
1326 return -EINVAL;
1327 }
1328
1329 select = nd_label;
1330 /*
1331 * Check that this label is compliant with the dpa
1332 * range published in NFIT
1333 */
1334 hw_start = nd_mapping->start;
1335 hw_end = hw_start + nd_mapping->size;
1336 pmem_start = __le64_to_cpu(select->dpa);
1337 pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
1338 if (pmem_start == hw_start && pmem_end <= hw_end)
1339 /* pass */;
1340 else
1341 return -EINVAL;
1342
1343 nd_mapping->labels[0] = select;
1344 nd_mapping->labels[1] = NULL;
1345 }
1346 return 0;
1347}
1348
1349/**
1350 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1351 * @nd_region: region with mappings to validate
1352 */
1353static int find_pmem_label_set(struct nd_region *nd_region,
1354 struct nd_namespace_pmem *nspm)
1355{
1356 u64 cookie = nd_region_interleave_set_cookie(nd_region);
1357 struct nd_namespace_label *nd_label;
1358 u8 select_id[NSLABEL_UUID_LEN];
1359 resource_size_t size = 0;
1360 u8 *pmem_id = NULL;
1361 int rc = -ENODEV, l;
1362 u16 i;
1363
1364 if (cookie == 0)
1365 return -ENXIO;
1366
1367 /*
1368 * Find a complete set of labels by uuid. By definition we can start
1369 * with any mapping as the reference label
1370 */
1371 for_each_label(l, nd_label, nd_region->mapping[0].labels) {
1372 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1373
1374 if (isetcookie != cookie)
1375 continue;
1376
1377 for (i = 0; nd_region->ndr_mappings; i++)
1378 if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1379 cookie, i))
1380 break;
1381 if (i < nd_region->ndr_mappings) {
1382 /*
1383 * Give up if we don't find an instance of a
1384 * uuid at each position (from 0 to
1385 * nd_region->ndr_mappings - 1), or if we find a
1386 * dimm with two instances of the same uuid.
1387 */
1388 rc = -EINVAL;
1389 goto err;
1390 } else if (pmem_id) {
1391 /*
1392 * If there is more than one valid uuid set, we
1393 * need userspace to clean this up.
1394 */
1395 rc = -EBUSY;
1396 goto err;
1397 }
1398 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1399 pmem_id = select_id;
1400 }
1401
1402 /*
1403 * Fix up each mapping's 'labels' to have the validated pmem label for
1404 * that position at labels[0], and NULL at labels[1]. In the process,
1405 * check that the namespace aligns with interleave-set. We know
1406 * that it does not overlap with any blk namespaces by virtue of
1407 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1408 * succeeded).
1409 */
1410 rc = select_pmem_id(nd_region, pmem_id);
1411 if (rc)
1412 goto err;
1413
1414 /* Calculate total size and populate namespace properties from label0 */
1415 for (i = 0; i < nd_region->ndr_mappings; i++) {
1416 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1417 struct nd_namespace_label *label0 = nd_mapping->labels[0];
1418
1419 size += __le64_to_cpu(label0->rawsize);
1420 if (__le16_to_cpu(label0->position) != 0)
1421 continue;
1422 WARN_ON(nspm->alt_name || nspm->uuid);
1423 nspm->alt_name = kmemdup((void __force *) label0->name,
1424 NSLABEL_NAME_LEN, GFP_KERNEL);
1425 nspm->uuid = kmemdup((void __force *) label0->uuid,
1426 NSLABEL_UUID_LEN, GFP_KERNEL);
1427 }
1428
1429 if (!nspm->alt_name || !nspm->uuid) {
1430 rc = -ENOMEM;
1431 goto err;
1432 }
1433
1434 nd_namespace_pmem_set_size(nd_region, nspm, size);
1435
1436 return 0;
1437 err:
1438 switch (rc) {
1439 case -EINVAL:
1440 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1441 break;
1442 case -ENODEV:
1443 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1444 break;
1445 default:
1446 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1447 __func__, rc);
1448 break;
1449 }
1450 return rc;
1451}
1452
1453static struct device **create_namespace_pmem(struct nd_region *nd_region)
1454{
1455 struct nd_namespace_pmem *nspm;
1456 struct device *dev, **devs;
1457 struct resource *res;
1458 int rc;
1459
1460 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1461 if (!nspm)
1462 return NULL;
1463
Dan Williams8c2f7e82015-06-25 04:20:04 -04001464 dev = &nspm->nsio.common.dev;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001465 dev->type = &namespace_pmem_device_type;
1466 dev->parent = &nd_region->dev;
1467 res = &nspm->nsio.res;
1468 res->name = dev_name(&nd_region->dev);
1469 res->flags = IORESOURCE_MEM;
1470 rc = find_pmem_label_set(nd_region, nspm);
1471 if (rc == -ENODEV) {
1472 int i;
1473
1474 /* Pass, try to permit namespace creation... */
1475 for (i = 0; i < nd_region->ndr_mappings; i++) {
1476 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1477
1478 kfree(nd_mapping->labels);
1479 nd_mapping->labels = NULL;
1480 }
1481
1482 /* Publish a zero-sized namespace for userspace to configure. */
1483 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1484
1485 rc = 0;
1486 } else if (rc)
1487 goto err;
1488
1489 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1490 if (!devs)
1491 goto err;
1492
1493 devs[0] = dev;
1494 return devs;
1495
1496 err:
Dan Williams8c2f7e82015-06-25 04:20:04 -04001497 namespace_pmem_release(&nspm->nsio.common.dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001498 return NULL;
1499}
1500
Dan Williams1b40e092015-05-01 13:34:01 -04001501struct resource *nsblk_add_resource(struct nd_region *nd_region,
1502 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1503 resource_size_t start)
1504{
1505 struct nd_label_id label_id;
1506 struct resource *res;
1507
1508 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1509 res = krealloc(nsblk->res,
1510 sizeof(void *) * (nsblk->num_resources + 1),
1511 GFP_KERNEL);
1512 if (!res)
1513 return NULL;
1514 nsblk->res = (struct resource **) res;
1515 for_each_dpa_resource(ndd, res)
1516 if (strcmp(res->name, label_id.id) == 0
1517 && res->start == start) {
1518 nsblk->res[nsblk->num_resources++] = res;
1519 return res;
1520 }
1521 return NULL;
1522}
1523
1524static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1525{
1526 struct nd_namespace_blk *nsblk;
1527 struct device *dev;
1528
1529 if (!is_nd_blk(&nd_region->dev))
1530 return NULL;
1531
1532 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1533 if (!nsblk)
1534 return NULL;
1535
Dan Williams8c2f7e82015-06-25 04:20:04 -04001536 dev = &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001537 dev->type = &namespace_blk_device_type;
1538 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1539 if (nsblk->id < 0) {
1540 kfree(nsblk);
1541 return NULL;
1542 }
1543 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1544 dev->parent = &nd_region->dev;
1545 dev->groups = nd_namespace_attribute_groups;
1546
Dan Williams8c2f7e82015-06-25 04:20:04 -04001547 return &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001548}
1549
1550void nd_region_create_blk_seed(struct nd_region *nd_region)
1551{
1552 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1553 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1554 /*
1555 * Seed creation failures are not fatal, provisioning is simply
1556 * disabled until memory becomes available
1557 */
1558 if (!nd_region->ns_seed)
1559 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1560 else
1561 nd_device_register(nd_region->ns_seed);
1562}
1563
Dan Williams8c2f7e82015-06-25 04:20:04 -04001564void nd_region_create_btt_seed(struct nd_region *nd_region)
1565{
1566 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1567 nd_region->btt_seed = nd_btt_create(nd_region);
1568 /*
1569 * Seed creation failures are not fatal, provisioning is simply
1570 * disabled until memory becomes available
1571 */
1572 if (!nd_region->btt_seed)
1573 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1574}
1575
Dan Williams1b40e092015-05-01 13:34:01 -04001576static struct device **create_namespace_blk(struct nd_region *nd_region)
1577{
1578 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1579 struct nd_namespace_label *nd_label;
1580 struct device *dev, **devs = NULL;
1581 struct nd_namespace_blk *nsblk;
1582 struct nvdimm_drvdata *ndd;
1583 int i, l, count = 0;
1584 struct resource *res;
1585
1586 if (nd_region->ndr_mappings == 0)
1587 return NULL;
1588
1589 ndd = to_ndd(nd_mapping);
1590 for_each_label(l, nd_label, nd_mapping->labels) {
1591 u32 flags = __le32_to_cpu(nd_label->flags);
1592 char *name[NSLABEL_NAME_LEN];
1593 struct device **__devs;
1594
1595 if (flags & NSLABEL_FLAG_LOCAL)
1596 /* pass */;
1597 else
1598 continue;
1599
1600 for (i = 0; i < count; i++) {
1601 nsblk = to_nd_namespace_blk(devs[i]);
1602 if (memcmp(nsblk->uuid, nd_label->uuid,
1603 NSLABEL_UUID_LEN) == 0) {
1604 res = nsblk_add_resource(nd_region, ndd, nsblk,
1605 __le64_to_cpu(nd_label->dpa));
1606 if (!res)
1607 goto err;
1608 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
Dan Williams8c2f7e82015-06-25 04:20:04 -04001609 dev_name(&nsblk->common.dev));
Dan Williams1b40e092015-05-01 13:34:01 -04001610 break;
1611 }
1612 }
1613 if (i < count)
1614 continue;
1615 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1616 if (!__devs)
1617 goto err;
1618 memcpy(__devs, devs, sizeof(dev) * count);
1619 kfree(devs);
1620 devs = __devs;
1621
1622 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1623 if (!nsblk)
1624 goto err;
Dan Williams8c2f7e82015-06-25 04:20:04 -04001625 dev = &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001626 dev->type = &namespace_blk_device_type;
1627 dev->parent = &nd_region->dev;
1628 dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1629 devs[count++] = dev;
1630 nsblk->id = -1;
1631 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1632 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1633 GFP_KERNEL);
1634 if (!nsblk->uuid)
1635 goto err;
1636 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1637 if (name[0])
1638 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1639 GFP_KERNEL);
1640 res = nsblk_add_resource(nd_region, ndd, nsblk,
1641 __le64_to_cpu(nd_label->dpa));
1642 if (!res)
1643 goto err;
1644 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
Dan Williams8c2f7e82015-06-25 04:20:04 -04001645 dev_name(&nsblk->common.dev));
Dan Williams1b40e092015-05-01 13:34:01 -04001646 }
1647
1648 dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1649 __func__, count, count == 1 ? "" : "s");
1650
1651 if (count == 0) {
1652 /* Publish a zero-sized namespace for userspace to configure. */
1653 for (i = 0; i < nd_region->ndr_mappings; i++) {
1654 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1655
1656 kfree(nd_mapping->labels);
1657 nd_mapping->labels = NULL;
1658 }
1659
1660 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1661 if (!devs)
1662 goto err;
1663 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1664 if (!nsblk)
1665 goto err;
Dan Williams8c2f7e82015-06-25 04:20:04 -04001666 dev = &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001667 dev->type = &namespace_blk_device_type;
1668 dev->parent = &nd_region->dev;
1669 devs[count++] = dev;
1670 }
1671
1672 return devs;
1673
1674err:
1675 for (i = 0; i < count; i++) {
1676 nsblk = to_nd_namespace_blk(devs[i]);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001677 namespace_blk_release(&nsblk->common.dev);
Dan Williams1b40e092015-05-01 13:34:01 -04001678 }
1679 kfree(devs);
1680 return NULL;
1681}
1682
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001683static int init_active_labels(struct nd_region *nd_region)
1684{
1685 int i;
1686
1687 for (i = 0; i < nd_region->ndr_mappings; i++) {
1688 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1689 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1690 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1691 int count, j;
1692
1693 /*
1694 * If the dimm is disabled then prevent the region from
1695 * being activated if it aliases DPA.
1696 */
1697 if (!ndd) {
1698 if ((nvdimm->flags & NDD_ALIASING) == 0)
1699 return 0;
1700 dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
1701 dev_name(&nd_mapping->nvdimm->dev));
1702 return -ENXIO;
1703 }
1704 nd_mapping->ndd = ndd;
1705 atomic_inc(&nvdimm->busy);
1706 get_ndd(ndd);
1707
1708 count = nd_label_active_count(ndd);
1709 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
1710 if (!count)
1711 continue;
1712 nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
1713 GFP_KERNEL);
1714 if (!nd_mapping->labels)
1715 return -ENOMEM;
1716 for (j = 0; j < count; j++) {
1717 struct nd_namespace_label *label;
1718
1719 label = nd_label_active(ndd, j);
1720 nd_mapping->labels[j] = label;
1721 }
1722 }
1723
1724 return 0;
1725}
1726
Dan Williams3d880022015-05-31 15:02:11 -04001727int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
1728{
1729 struct device **devs = NULL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001730 int i, rc = 0, type;
Dan Williams3d880022015-05-31 15:02:11 -04001731
1732 *err = 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001733 nvdimm_bus_lock(&nd_region->dev);
1734 rc = init_active_labels(nd_region);
1735 if (rc) {
1736 nvdimm_bus_unlock(&nd_region->dev);
1737 return rc;
1738 }
1739
1740 type = nd_region_to_nstype(nd_region);
1741 switch (type) {
Dan Williams3d880022015-05-31 15:02:11 -04001742 case ND_DEVICE_NAMESPACE_IO:
1743 devs = create_namespace_io(nd_region);
1744 break;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001745 case ND_DEVICE_NAMESPACE_PMEM:
1746 devs = create_namespace_pmem(nd_region);
1747 break;
Dan Williams1b40e092015-05-01 13:34:01 -04001748 case ND_DEVICE_NAMESPACE_BLK:
1749 devs = create_namespace_blk(nd_region);
1750 break;
Dan Williams3d880022015-05-31 15:02:11 -04001751 default:
1752 break;
1753 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001754 nvdimm_bus_unlock(&nd_region->dev);
Dan Williams3d880022015-05-31 15:02:11 -04001755
1756 if (!devs)
1757 return -ENODEV;
1758
Dan Williams3d880022015-05-31 15:02:11 -04001759 for (i = 0; devs[i]; i++) {
1760 struct device *dev = devs[i];
Dan Williams1b40e092015-05-01 13:34:01 -04001761 int id;
Dan Williams3d880022015-05-31 15:02:11 -04001762
Dan Williams1b40e092015-05-01 13:34:01 -04001763 if (type == ND_DEVICE_NAMESPACE_BLK) {
1764 struct nd_namespace_blk *nsblk;
1765
1766 nsblk = to_nd_namespace_blk(dev);
1767 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
1768 GFP_KERNEL);
1769 nsblk->id = id;
1770 } else
1771 id = i;
1772
1773 if (id < 0)
1774 break;
1775 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
Dan Williams3d880022015-05-31 15:02:11 -04001776 dev->groups = nd_namespace_attribute_groups;
1777 nd_device_register(dev);
1778 }
Dan Williams1b40e092015-05-01 13:34:01 -04001779 if (i)
1780 nd_region->ns_seed = devs[0];
1781
1782 if (devs[i]) {
1783 int j;
1784
1785 for (j = i; devs[j]; j++) {
1786 struct device *dev = devs[j];
1787
1788 device_initialize(dev);
1789 put_device(dev);
1790 }
1791 *err = j - i;
1792 /*
1793 * All of the namespaces we tried to register failed, so
1794 * fail region activation.
1795 */
1796 if (*err == 0)
1797 rc = -ENODEV;
1798 }
Dan Williams3d880022015-05-31 15:02:11 -04001799 kfree(devs);
1800
Dan Williams1b40e092015-05-01 13:34:01 -04001801 if (rc == -ENODEV)
1802 return rc;
1803
Dan Williams3d880022015-05-31 15:02:11 -04001804 return i;
1805}