dm ioctl: release _hash_lock between devices in remove_all
[linux-2.6-block.git] / drivers / md / dm-ioctl.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
2b06cfff 3 * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/miscdevice.h>
13#include <linux/init.h>
14#include <linux/wait.h>
15#include <linux/slab.h>
1da177e4 16#include <linux/dm-ioctl.h>
3ac51e74 17#include <linux/hdreg.h>
76c072b4 18#include <linux/compat.h>
1da177e4
LT
19
20#include <asm/uaccess.h>
21
72d94861 22#define DM_MSG_PREFIX "ioctl"
1da177e4
LT
23#define DM_DRIVER_EMAIL "dm-devel@redhat.com"
24
25/*-----------------------------------------------------------------
26 * The ioctl interface needs to be able to look up devices by
27 * name or uuid.
28 *---------------------------------------------------------------*/
29struct hash_cell {
30 struct list_head name_list;
31 struct list_head uuid_list;
32
33 char *name;
34 char *uuid;
35 struct mapped_device *md;
36 struct dm_table *new_map;
37};
38
39struct vers_iter {
40 size_t param_size;
41 struct dm_target_versions *vers, *old_vers;
42 char *end;
43 uint32_t flags;
44};
45
46
47#define NUM_BUCKETS 64
48#define MASK_BUCKETS (NUM_BUCKETS - 1)
49static struct list_head _name_buckets[NUM_BUCKETS];
50static struct list_head _uuid_buckets[NUM_BUCKETS];
51
5c6bd75d 52static void dm_hash_remove_all(int keep_open_devices);
1da177e4
LT
53
54/*
55 * Guards access to both hash tables.
56 */
57static DECLARE_RWSEM(_hash_lock);
58
6076905b
MP
59/*
60 * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
61 */
62static DEFINE_MUTEX(dm_hash_cells_mutex);
63
1da177e4
LT
64static void init_buckets(struct list_head *buckets)
65{
66 unsigned int i;
67
68 for (i = 0; i < NUM_BUCKETS; i++)
69 INIT_LIST_HEAD(buckets + i);
70}
71
72static int dm_hash_init(void)
73{
74 init_buckets(_name_buckets);
75 init_buckets(_uuid_buckets);
1da177e4
LT
76 return 0;
77}
78
79static void dm_hash_exit(void)
80{
5c6bd75d 81 dm_hash_remove_all(0);
1da177e4
LT
82}
83
84/*-----------------------------------------------------------------
85 * Hash function:
86 * We're not really concerned with the str hash function being
87 * fast since it's only used by the ioctl interface.
88 *---------------------------------------------------------------*/
89static unsigned int hash_str(const char *str)
90{
91 const unsigned int hash_mult = 2654435387U;
92 unsigned int h = 0;
93
94 while (*str)
95 h = (h + (unsigned int) *str++) * hash_mult;
96
97 return h & MASK_BUCKETS;
98}
99
100/*-----------------------------------------------------------------
101 * Code for looking up a device by name
102 *---------------------------------------------------------------*/
103static struct hash_cell *__get_name_cell(const char *str)
104{
105 struct hash_cell *hc;
106 unsigned int h = hash_str(str);
107
108 list_for_each_entry (hc, _name_buckets + h, name_list)
7ec75f25
JM
109 if (!strcmp(hc->name, str)) {
110 dm_get(hc->md);
1da177e4 111 return hc;
7ec75f25 112 }
1da177e4
LT
113
114 return NULL;
115}
116
117static struct hash_cell *__get_uuid_cell(const char *str)
118{
119 struct hash_cell *hc;
120 unsigned int h = hash_str(str);
121
122 list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
7ec75f25
JM
123 if (!strcmp(hc->uuid, str)) {
124 dm_get(hc->md);
1da177e4 125 return hc;
7ec75f25 126 }
1da177e4
LT
127
128 return NULL;
129}
130
131/*-----------------------------------------------------------------
132 * Inserting, removing and renaming a device.
133 *---------------------------------------------------------------*/
1da177e4
LT
134static struct hash_cell *alloc_cell(const char *name, const char *uuid,
135 struct mapped_device *md)
136{
137 struct hash_cell *hc;
138
139 hc = kmalloc(sizeof(*hc), GFP_KERNEL);
140 if (!hc)
141 return NULL;
142
543537bd 143 hc->name = kstrdup(name, GFP_KERNEL);
1da177e4
LT
144 if (!hc->name) {
145 kfree(hc);
146 return NULL;
147 }
148
149 if (!uuid)
150 hc->uuid = NULL;
151
152 else {
543537bd 153 hc->uuid = kstrdup(uuid, GFP_KERNEL);
1da177e4
LT
154 if (!hc->uuid) {
155 kfree(hc->name);
156 kfree(hc);
157 return NULL;
158 }
159 }
160
161 INIT_LIST_HEAD(&hc->name_list);
162 INIT_LIST_HEAD(&hc->uuid_list);
163 hc->md = md;
164 hc->new_map = NULL;
165 return hc;
166}
167
168static void free_cell(struct hash_cell *hc)
169{
170 if (hc) {
171 kfree(hc->name);
172 kfree(hc->uuid);
173 kfree(hc);
174 }
175}
176
1da177e4
LT
177/*
178 * The kdev_t and uuid of a device can never change once it is
179 * initially inserted.
180 */
181static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
182{
7ec75f25 183 struct hash_cell *cell, *hc;
1da177e4
LT
184
185 /*
186 * Allocate the new cells.
187 */
188 cell = alloc_cell(name, uuid, md);
189 if (!cell)
190 return -ENOMEM;
191
192 /*
193 * Insert the cell into both hash tables.
194 */
195 down_write(&_hash_lock);
7ec75f25
JM
196 hc = __get_name_cell(name);
197 if (hc) {
198 dm_put(hc->md);
1da177e4 199 goto bad;
7ec75f25 200 }
1da177e4
LT
201
202 list_add(&cell->name_list, _name_buckets + hash_str(name));
203
204 if (uuid) {
7ec75f25
JM
205 hc = __get_uuid_cell(uuid);
206 if (hc) {
1da177e4 207 list_del(&cell->name_list);
7ec75f25 208 dm_put(hc->md);
1da177e4
LT
209 goto bad;
210 }
211 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
212 }
1da177e4 213 dm_get(md);
6076905b 214 mutex_lock(&dm_hash_cells_mutex);
1da177e4 215 dm_set_mdptr(md, cell);
6076905b 216 mutex_unlock(&dm_hash_cells_mutex);
1da177e4
LT
217 up_write(&_hash_lock);
218
219 return 0;
220
221 bad:
222 up_write(&_hash_lock);
223 free_cell(cell);
224 return -EBUSY;
225}
226
227static void __hash_remove(struct hash_cell *hc)
228{
269fd2a6 229 struct dm_table *table;
230
1da177e4
LT
231 /* remove from the dev hash */
232 list_del(&hc->uuid_list);
233 list_del(&hc->name_list);
6076905b 234 mutex_lock(&dm_hash_cells_mutex);
1da177e4 235 dm_set_mdptr(hc->md, NULL);
6076905b 236 mutex_unlock(&dm_hash_cells_mutex);
269fd2a6 237
7c666411 238 table = dm_get_live_table(hc->md);
269fd2a6 239 if (table) {
240 dm_table_event(table);
241 dm_table_put(table);
242 }
243
1da177e4 244 if (hc->new_map)
d5816876 245 dm_table_destroy(hc->new_map);
1134e5ae 246 dm_put(hc->md);
1da177e4
LT
247 free_cell(hc);
248}
249
5c6bd75d 250static void dm_hash_remove_all(int keep_open_devices)
1da177e4 251{
98f33285 252 int i, dev_skipped;
1da177e4 253 struct hash_cell *hc;
98f33285
KU
254 struct mapped_device *md;
255
256retry:
257 dev_skipped = 0;
1da177e4
LT
258
259 down_write(&_hash_lock);
5c6bd75d 260
1da177e4 261 for (i = 0; i < NUM_BUCKETS; i++) {
98f33285
KU
262 list_for_each_entry(hc, _name_buckets + i, name_list) {
263 md = hc->md;
264 dm_get(md);
5c6bd75d 265
98f33285
KU
266 if (keep_open_devices && dm_lock_for_deletion(md)) {
267 dm_put(md);
5c6bd75d
AK
268 dev_skipped++;
269 continue;
270 }
98f33285 271
1da177e4 272 __hash_remove(hc);
5c6bd75d 273
98f33285 274 up_write(&_hash_lock);
5c6bd75d 275
98f33285
KU
276 dm_put(md);
277
278 /*
279 * Some mapped devices may be using other mapped
280 * devices, so repeat until we make no further
281 * progress. If a new mapped device is created
282 * here it will also get removed.
283 */
284 goto retry;
285 }
5c6bd75d
AK
286 }
287
1da177e4 288 up_write(&_hash_lock);
98f33285
KU
289
290 if (dev_skipped)
291 DMWARN("remove_all left %d open device(s)", dev_skipped);
1da177e4
LT
292}
293
856a6f1d
PR
294static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
295 const char *new)
1da177e4
LT
296{
297 char *new_name, *old_name;
298 struct hash_cell *hc;
81f1777a 299 struct dm_table *table;
856a6f1d 300 struct mapped_device *md;
1da177e4
LT
301
302 /*
303 * duplicate new.
304 */
543537bd 305 new_name = kstrdup(new, GFP_KERNEL);
1da177e4 306 if (!new_name)
856a6f1d 307 return ERR_PTR(-ENOMEM);
1da177e4
LT
308
309 down_write(&_hash_lock);
310
311 /*
312 * Is new free ?
313 */
314 hc = __get_name_cell(new);
315 if (hc) {
856a6f1d
PR
316 DMWARN("asked to rename to an already-existing name %s -> %s",
317 param->name, new);
7ec75f25 318 dm_put(hc->md);
1da177e4
LT
319 up_write(&_hash_lock);
320 kfree(new_name);
856a6f1d 321 return ERR_PTR(-EBUSY);
1da177e4
LT
322 }
323
324 /*
325 * Is there such a device as 'old' ?
326 */
856a6f1d 327 hc = __get_name_cell(param->name);
1da177e4 328 if (!hc) {
856a6f1d
PR
329 DMWARN("asked to rename a non-existent device %s -> %s",
330 param->name, new);
1da177e4
LT
331 up_write(&_hash_lock);
332 kfree(new_name);
856a6f1d 333 return ERR_PTR(-ENXIO);
1da177e4
LT
334 }
335
336 /*
337 * rename and move the name cell.
338 */
1da177e4
LT
339 list_del(&hc->name_list);
340 old_name = hc->name;
6076905b 341 mutex_lock(&dm_hash_cells_mutex);
1da177e4 342 hc->name = new_name;
6076905b 343 mutex_unlock(&dm_hash_cells_mutex);
1da177e4
LT
344 list_add(&hc->name_list, _name_buckets + hash_str(new_name));
345
81f1777a 346 /*
347 * Wake up any dm event waiters.
348 */
7c666411 349 table = dm_get_live_table(hc->md);
81f1777a 350 if (table) {
351 dm_table_event(table);
352 dm_table_put(table);
353 }
354
856a6f1d
PR
355 if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
356 param->flags |= DM_UEVENT_GENERATED_FLAG;
69267a30 357
856a6f1d 358 md = hc->md;
1da177e4
LT
359 up_write(&_hash_lock);
360 kfree(old_name);
856a6f1d
PR
361
362 return md;
1da177e4
LT
363}
364
365/*-----------------------------------------------------------------
366 * Implementation of the ioctl commands
367 *---------------------------------------------------------------*/
368/*
369 * All the ioctl commands get dispatched to functions with this
370 * prototype.
371 */
372typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
373
374static int remove_all(struct dm_ioctl *param, size_t param_size)
375{
5c6bd75d 376 dm_hash_remove_all(1);
1da177e4
LT
377 param->data_size = 0;
378 return 0;
379}
380
381/*
382 * Round up the ptr to an 8-byte boundary.
383 */
384#define ALIGN_MASK 7
385static inline void *align_ptr(void *ptr)
386{
387 return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
388}
389
390/*
391 * Retrieves the data payload buffer from an already allocated
392 * struct dm_ioctl.
393 */
394static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
395 size_t *len)
396{
397 param->data_start = align_ptr(param + 1) - (void *) param;
398
399 if (param->data_start < param_size)
400 *len = param_size - param->data_start;
401 else
402 *len = 0;
403
404 return ((void *) param) + param->data_start;
405}
406
407static int list_devices(struct dm_ioctl *param, size_t param_size)
408{
409 unsigned int i;
410 struct hash_cell *hc;
411 size_t len, needed = 0;
412 struct gendisk *disk;
413 struct dm_name_list *nl, *old_nl = NULL;
414
415 down_write(&_hash_lock);
416
417 /*
418 * Loop through all the devices working out how much
419 * space we need.
420 */
421 for (i = 0; i < NUM_BUCKETS; i++) {
422 list_for_each_entry (hc, _name_buckets + i, name_list) {
423 needed += sizeof(struct dm_name_list);
424 needed += strlen(hc->name) + 1;
425 needed += ALIGN_MASK;
426 }
427 }
428
429 /*
430 * Grab our output buffer.
431 */
432 nl = get_result_buffer(param, param_size, &len);
433 if (len < needed) {
434 param->flags |= DM_BUFFER_FULL_FLAG;
435 goto out;
436 }
437 param->data_size = param->data_start + needed;
438
439 nl->dev = 0; /* Flags no data */
440
441 /*
442 * Now loop through filling out the names.
443 */
444 for (i = 0; i < NUM_BUCKETS; i++) {
445 list_for_each_entry (hc, _name_buckets + i, name_list) {
446 if (old_nl)
447 old_nl->next = (uint32_t) ((void *) nl -
448 (void *) old_nl);
449 disk = dm_disk(hc->md);
f331c029 450 nl->dev = huge_encode_dev(disk_devt(disk));
1da177e4
LT
451 nl->next = 0;
452 strcpy(nl->name, hc->name);
453
454 old_nl = nl;
455 nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1);
456 }
457 }
458
459 out:
460 up_write(&_hash_lock);
461 return 0;
462}
463
464static void list_version_get_needed(struct target_type *tt, void *needed_param)
465{
466 size_t *needed = needed_param;
467
c4cc6635 468 *needed += sizeof(struct dm_target_versions);
1da177e4 469 *needed += strlen(tt->name);
1da177e4
LT
470 *needed += ALIGN_MASK;
471}
472
473static void list_version_get_info(struct target_type *tt, void *param)
474{
475 struct vers_iter *info = param;
476
477 /* Check space - it might have changed since the first iteration */
478 if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 >
479 info->end) {
480
481 info->flags = DM_BUFFER_FULL_FLAG;
482 return;
483 }
484
485 if (info->old_vers)
486 info->old_vers->next = (uint32_t) ((void *)info->vers -
487 (void *)info->old_vers);
488 info->vers->version[0] = tt->version[0];
489 info->vers->version[1] = tt->version[1];
490 info->vers->version[2] = tt->version[2];
491 info->vers->next = 0;
492 strcpy(info->vers->name, tt->name);
493
494 info->old_vers = info->vers;
495 info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1);
496}
497
498static int list_versions(struct dm_ioctl *param, size_t param_size)
499{
500 size_t len, needed = 0;
501 struct dm_target_versions *vers;
502 struct vers_iter iter_info;
503
504 /*
505 * Loop through all the devices working out how much
506 * space we need.
507 */
508 dm_target_iterate(list_version_get_needed, &needed);
509
510 /*
511 * Grab our output buffer.
512 */
513 vers = get_result_buffer(param, param_size, &len);
514 if (len < needed) {
515 param->flags |= DM_BUFFER_FULL_FLAG;
516 goto out;
517 }
518 param->data_size = param->data_start + needed;
519
520 iter_info.param_size = param_size;
521 iter_info.old_vers = NULL;
522 iter_info.vers = vers;
523 iter_info.flags = 0;
524 iter_info.end = (char *)vers+len;
525
526 /*
527 * Now loop through filling out the names & versions.
528 */
529 dm_target_iterate(list_version_get_info, &iter_info);
530 param->flags |= iter_info.flags;
531
532 out:
533 return 0;
534}
535
1da177e4
LT
536static int check_name(const char *name)
537{
538 if (strchr(name, '/')) {
539 DMWARN("invalid device name");
540 return -EINVAL;
541 }
542
543 return 0;
544}
545
1d0f3ce8
MS
546/*
547 * On successful return, the caller must not attempt to acquire
548 * _hash_lock without first calling dm_table_put, because dm_table_destroy
549 * waits for this dm_table_put and could be called under this lock.
550 */
551static struct dm_table *dm_get_inactive_table(struct mapped_device *md)
552{
553 struct hash_cell *hc;
554 struct dm_table *table = NULL;
555
556 down_read(&_hash_lock);
557 hc = dm_get_mdptr(md);
558 if (!hc || hc->md != md) {
559 DMWARN("device has been removed from the dev hash table.");
560 goto out;
561 }
562
563 table = hc->new_map;
564 if (table)
565 dm_table_get(table);
566
567out:
568 up_read(&_hash_lock);
569
570 return table;
571}
572
573static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
574 struct dm_ioctl *param)
575{
576 return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ?
577 dm_get_inactive_table(md) : dm_get_live_table(md);
578}
579
1da177e4
LT
580/*
581 * Fills in a dm_ioctl structure, ready for sending back to
582 * userland.
583 */
094ea9a0 584static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
1da177e4
LT
585{
586 struct gendisk *disk = dm_disk(md);
587 struct dm_table *table;
1da177e4
LT
588
589 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
590 DM_ACTIVE_PRESENT_FLAG);
591
4f186f8b 592 if (dm_suspended_md(md))
1da177e4
LT
593 param->flags |= DM_SUSPEND_FLAG;
594
f331c029 595 param->dev = huge_encode_dev(disk_devt(disk));
1da177e4 596
5c6bd75d
AK
597 /*
598 * Yes, this will be out of date by the time it gets back
599 * to userland, but it is still very useful for
600 * debugging.
601 */
602 param->open_count = dm_open_count(md);
1da177e4 603
1da177e4 604 param->event_nr = dm_get_event_nr(md);
1d0f3ce8 605 param->target_count = 0;
1da177e4 606
7c666411 607 table = dm_get_live_table(md);
1da177e4 608 if (table) {
1d0f3ce8
MS
609 if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
610 if (get_disk_ro(disk))
611 param->flags |= DM_READONLY_FLAG;
612 param->target_count = dm_table_get_num_targets(table);
613 }
1da177e4 614 dm_table_put(table);
1d0f3ce8
MS
615
616 param->flags |= DM_ACTIVE_PRESENT_FLAG;
617 }
618
619 if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
620 table = dm_get_inactive_table(md);
621 if (table) {
622 if (!(dm_table_get_mode(table) & FMODE_WRITE))
623 param->flags |= DM_READONLY_FLAG;
624 param->target_count = dm_table_get_num_targets(table);
625 dm_table_put(table);
626 }
627 }
1da177e4
LT
628}
629
630static int dev_create(struct dm_ioctl *param, size_t param_size)
631{
2b06cfff 632 int r, m = DM_ANY_MINOR;
1da177e4
LT
633 struct mapped_device *md;
634
635 r = check_name(param->name);
636 if (r)
637 return r;
638
639 if (param->flags & DM_PERSISTENT_DEV_FLAG)
2b06cfff 640 m = MINOR(huge_decode_dev(param->dev));
1da177e4 641
2b06cfff 642 r = dm_create(m, &md);
1da177e4
LT
643 if (r)
644 return r;
645
646 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
094ea9a0
AK
647 if (r)
648 goto out;
1da177e4
LT
649
650 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
651
094ea9a0
AK
652 __dev_status(md, param);
653
654out:
1da177e4
LT
655 dm_put(md);
656
657 return r;
658}
659
660/*
661 * Always use UUID for lookups if it's present, otherwise use name or dev.
662 */
858119e1 663static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
1da177e4 664{
9ade92a9
AK
665 struct mapped_device *md;
666 void *mdptr = NULL;
667
1da177e4
LT
668 if (*param->uuid)
669 return __get_uuid_cell(param->uuid);
9ade92a9
AK
670
671 if (*param->name)
1da177e4 672 return __get_name_cell(param->name);
9ade92a9
AK
673
674 md = dm_get_md(huge_decode_dev(param->dev));
bfc5ecdf
AK
675 if (!md)
676 goto out;
677
678 mdptr = dm_get_mdptr(md);
679 if (!mdptr)
680 dm_put(md);
9ade92a9 681
bfc5ecdf 682out:
9ade92a9 683 return mdptr;
1da177e4
LT
684}
685
858119e1 686static struct mapped_device *find_device(struct dm_ioctl *param)
1da177e4
LT
687{
688 struct hash_cell *hc;
689 struct mapped_device *md = NULL;
690
691 down_read(&_hash_lock);
692 hc = __find_device_hash_cell(param);
693 if (hc) {
694 md = hc->md;
1da177e4
LT
695
696 /*
697 * Sneakily write in both the name and the uuid
698 * while we have the cell.
699 */
a518b86d 700 strlcpy(param->name, hc->name, sizeof(param->name));
1da177e4 701 if (hc->uuid)
a518b86d 702 strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
1da177e4
LT
703 else
704 param->uuid[0] = '\0';
705
706 if (hc->new_map)
707 param->flags |= DM_INACTIVE_PRESENT_FLAG;
708 else
709 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
710 }
711 up_read(&_hash_lock);
712
713 return md;
714}
715
716static int dev_remove(struct dm_ioctl *param, size_t param_size)
717{
718 struct hash_cell *hc;
7ec75f25 719 struct mapped_device *md;
5c6bd75d 720 int r;
1da177e4
LT
721
722 down_write(&_hash_lock);
723 hc = __find_device_hash_cell(param);
724
725 if (!hc) {
726 DMWARN("device doesn't appear to be in the dev hash table.");
727 up_write(&_hash_lock);
728 return -ENXIO;
729 }
730
7ec75f25
JM
731 md = hc->md;
732
5c6bd75d
AK
733 /*
734 * Ensure the device is not open and nothing further can open it.
735 */
736 r = dm_lock_for_deletion(md);
737 if (r) {
738 DMWARN("unable to remove open device %s", hc->name);
739 up_write(&_hash_lock);
740 dm_put(md);
741 return r;
742 }
743
1da177e4
LT
744 __hash_remove(hc);
745 up_write(&_hash_lock);
60935eb2 746
3abf85b5
PR
747 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
748 param->flags |= DM_UEVENT_GENERATED_FLAG;
60935eb2 749
7ec75f25 750 dm_put(md);
1da177e4
LT
751 return 0;
752}
753
754/*
755 * Check a string doesn't overrun the chunk of
756 * memory we copied from userland.
757 */
758static int invalid_str(char *str, void *end)
759{
760 while ((void *) str < end)
761 if (!*str++)
762 return 0;
763
764 return -EINVAL;
765}
766
767static int dev_rename(struct dm_ioctl *param, size_t param_size)
768{
769 int r;
770 char *new_name = (char *) param + param->data_start;
856a6f1d 771 struct mapped_device *md;
1da177e4 772
27238b2b 773 if (new_name < param->data ||
bc0fd67f
MB
774 invalid_str(new_name, (void *) param + param_size) ||
775 strlen(new_name) > DM_NAME_LEN - 1) {
1da177e4
LT
776 DMWARN("Invalid new logical volume name supplied.");
777 return -EINVAL;
778 }
779
780 r = check_name(new_name);
781 if (r)
782 return r;
783
856a6f1d
PR
784 md = dm_hash_rename(param, new_name);
785 if (IS_ERR(md))
786 return PTR_ERR(md);
3abf85b5 787
856a6f1d
PR
788 __dev_status(md, param);
789 dm_put(md);
790
791 return 0;
1da177e4
LT
792}
793
3ac51e74
DW
794static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
795{
796 int r = -EINVAL, x;
797 struct mapped_device *md;
798 struct hd_geometry geometry;
799 unsigned long indata[4];
800 char *geostr = (char *) param + param->data_start;
801
802 md = find_device(param);
803 if (!md)
804 return -ENXIO;
805
27238b2b 806 if (geostr < param->data ||
3ac51e74
DW
807 invalid_str(geostr, (void *) param + param_size)) {
808 DMWARN("Invalid geometry supplied.");
809 goto out;
810 }
811
812 x = sscanf(geostr, "%lu %lu %lu %lu", indata,
813 indata + 1, indata + 2, indata + 3);
814
815 if (x != 4) {
816 DMWARN("Unable to interpret geometry settings.");
817 goto out;
818 }
819
820 if (indata[0] > 65535 || indata[1] > 255 ||
821 indata[2] > 255 || indata[3] > ULONG_MAX) {
822 DMWARN("Geometry exceeds range limits.");
823 goto out;
824 }
825
826 geometry.cylinders = indata[0];
827 geometry.heads = indata[1];
828 geometry.sectors = indata[2];
829 geometry.start = indata[3];
830
831 r = dm_set_geometry(md, &geometry);
3ac51e74
DW
832
833 param->data_size = 0;
834
835out:
836 dm_put(md);
837 return r;
838}
839
1da177e4
LT
840static int do_suspend(struct dm_ioctl *param)
841{
842 int r = 0;
a3d77d35 843 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
1da177e4
LT
844 struct mapped_device *md;
845
846 md = find_device(param);
847 if (!md)
848 return -ENXIO;
849
6da487dc 850 if (param->flags & DM_SKIP_LOCKFS_FLAG)
a3d77d35 851 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
81fdb096
KU
852 if (param->flags & DM_NOFLUSH_FLAG)
853 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
6da487dc 854
094ea9a0 855 if (!dm_suspended_md(md)) {
a3d77d35 856 r = dm_suspend(md, suspend_flags);
094ea9a0
AK
857 if (r)
858 goto out;
859 }
1da177e4 860
094ea9a0 861 __dev_status(md, param);
1da177e4 862
094ea9a0 863out:
1da177e4 864 dm_put(md);
094ea9a0 865
1da177e4
LT
866 return r;
867}
868
869static int do_resume(struct dm_ioctl *param)
870{
871 int r = 0;
a3d77d35 872 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
1da177e4
LT
873 struct hash_cell *hc;
874 struct mapped_device *md;
042d2a9b 875 struct dm_table *new_map, *old_map = NULL;
1da177e4
LT
876
877 down_write(&_hash_lock);
878
879 hc = __find_device_hash_cell(param);
880 if (!hc) {
881 DMWARN("device doesn't appear to be in the dev hash table.");
882 up_write(&_hash_lock);
883 return -ENXIO;
884 }
885
886 md = hc->md;
1da177e4
LT
887
888 new_map = hc->new_map;
889 hc->new_map = NULL;
890 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
891
892 up_write(&_hash_lock);
893
894 /* Do we need to load a new map ? */
895 if (new_map) {
896 /* Suspend if it isn't already suspended */
6da487dc 897 if (param->flags & DM_SKIP_LOCKFS_FLAG)
a3d77d35 898 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
81fdb096
KU
899 if (param->flags & DM_NOFLUSH_FLAG)
900 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
4f186f8b 901 if (!dm_suspended_md(md))
a3d77d35 902 dm_suspend(md, suspend_flags);
1da177e4 903
042d2a9b
AK
904 old_map = dm_swap_table(md, new_map);
905 if (IS_ERR(old_map)) {
d5816876 906 dm_table_destroy(new_map);
1da177e4 907 dm_put(md);
042d2a9b 908 return PTR_ERR(old_map);
1da177e4
LT
909 }
910
911 if (dm_table_get_mode(new_map) & FMODE_WRITE)
912 set_disk_ro(dm_disk(md), 0);
913 else
914 set_disk_ro(dm_disk(md), 1);
1da177e4
LT
915 }
916
0f3649a9 917 if (dm_suspended_md(md)) {
1da177e4 918 r = dm_resume(md);
3abf85b5
PR
919 if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
920 param->flags |= DM_UEVENT_GENERATED_FLAG;
0f3649a9 921 }
1da177e4 922
042d2a9b
AK
923 if (old_map)
924 dm_table_destroy(old_map);
60935eb2 925
0f3649a9 926 if (!r)
094ea9a0 927 __dev_status(md, param);
1da177e4
LT
928
929 dm_put(md);
930 return r;
931}
932
933/*
934 * Set or unset the suspension state of a device.
935 * If the device already is in the requested state we just return its status.
936 */
937static int dev_suspend(struct dm_ioctl *param, size_t param_size)
938{
939 if (param->flags & DM_SUSPEND_FLAG)
940 return do_suspend(param);
941
942 return do_resume(param);
943}
944
945/*
946 * Copies device info back to user space, used by
947 * the create and info ioctls.
948 */
949static int dev_status(struct dm_ioctl *param, size_t param_size)
950{
1da177e4
LT
951 struct mapped_device *md;
952
953 md = find_device(param);
954 if (!md)
955 return -ENXIO;
956
094ea9a0 957 __dev_status(md, param);
1da177e4 958 dm_put(md);
094ea9a0
AK
959
960 return 0;
1da177e4
LT
961}
962
963/*
964 * Build up the status struct for each target
965 */
966static void retrieve_status(struct dm_table *table,
967 struct dm_ioctl *param, size_t param_size)
968{
969 unsigned int i, num_targets;
970 struct dm_target_spec *spec;
971 char *outbuf, *outptr;
972 status_type_t type;
973 size_t remaining, len, used = 0;
974
975 outptr = outbuf = get_result_buffer(param, param_size, &len);
976
977 if (param->flags & DM_STATUS_TABLE_FLAG)
978 type = STATUSTYPE_TABLE;
979 else
980 type = STATUSTYPE_INFO;
981
982 /* Get all the target info */
983 num_targets = dm_table_get_num_targets(table);
984 for (i = 0; i < num_targets; i++) {
985 struct dm_target *ti = dm_table_get_target(table, i);
986
987 remaining = len - (outptr - outbuf);
988 if (remaining <= sizeof(struct dm_target_spec)) {
989 param->flags |= DM_BUFFER_FULL_FLAG;
990 break;
991 }
992
993 spec = (struct dm_target_spec *) outptr;
994
995 spec->status = 0;
996 spec->sector_start = ti->begin;
997 spec->length = ti->len;
998 strncpy(spec->target_type, ti->type->name,
999 sizeof(spec->target_type));
1000
1001 outptr += sizeof(struct dm_target_spec);
1002 remaining = len - (outptr - outbuf);
1003 if (remaining <= 0) {
1004 param->flags |= DM_BUFFER_FULL_FLAG;
1005 break;
1006 }
1007
1008 /* Get the status/table string from the target driver */
1009 if (ti->type->status) {
1010 if (ti->type->status(ti, type, outptr, remaining)) {
1011 param->flags |= DM_BUFFER_FULL_FLAG;
1012 break;
1013 }
1014 } else
1015 outptr[0] = '\0';
1016
1017 outptr += strlen(outptr) + 1;
1018 used = param->data_start + (outptr - outbuf);
1019
1020 outptr = align_ptr(outptr);
1021 spec->next = outptr - outbuf;
1022 }
1023
1024 if (used)
1025 param->data_size = used;
1026
1027 param->target_count = num_targets;
1028}
1029
1030/*
1031 * Wait for a device to report an event
1032 */
1033static int dev_wait(struct dm_ioctl *param, size_t param_size)
1034{
094ea9a0 1035 int r = 0;
1da177e4
LT
1036 struct mapped_device *md;
1037 struct dm_table *table;
1038
1039 md = find_device(param);
1040 if (!md)
1041 return -ENXIO;
1042
1043 /*
1044 * Wait for a notification event
1045 */
1046 if (dm_wait_event(md, param->event_nr)) {
1047 r = -ERESTARTSYS;
1048 goto out;
1049 }
1050
1051 /*
1052 * The userland program is going to want to know what
1053 * changed to trigger the event, so we may as well tell
1054 * him and save an ioctl.
1055 */
094ea9a0 1056 __dev_status(md, param);
1da177e4 1057
1d0f3ce8 1058 table = dm_get_live_or_inactive_table(md, param);
1da177e4
LT
1059 if (table) {
1060 retrieve_status(table, param, param_size);
1061 dm_table_put(table);
1062 }
1063
094ea9a0 1064out:
1da177e4 1065 dm_put(md);
094ea9a0 1066
1da177e4
LT
1067 return r;
1068}
1069
aeb5d727 1070static inline fmode_t get_mode(struct dm_ioctl *param)
1da177e4 1071{
aeb5d727 1072 fmode_t mode = FMODE_READ | FMODE_WRITE;
1da177e4
LT
1073
1074 if (param->flags & DM_READONLY_FLAG)
1075 mode = FMODE_READ;
1076
1077 return mode;
1078}
1079
1080static int next_target(struct dm_target_spec *last, uint32_t next, void *end,
1081 struct dm_target_spec **spec, char **target_params)
1082{
1083 *spec = (struct dm_target_spec *) ((unsigned char *) last + next);
1084 *target_params = (char *) (*spec + 1);
1085
1086 if (*spec < (last + 1))
1087 return -EINVAL;
1088
1089 return invalid_str(*target_params, end);
1090}
1091
1092static int populate_table(struct dm_table *table,
1093 struct dm_ioctl *param, size_t param_size)
1094{
1095 int r;
1096 unsigned int i = 0;
1097 struct dm_target_spec *spec = (struct dm_target_spec *) param;
1098 uint32_t next = param->data_start;
1099 void *end = (void *) param + param_size;
1100 char *target_params;
1101
1102 if (!param->target_count) {
1103 DMWARN("populate_table: no targets specified");
1104 return -EINVAL;
1105 }
1106
1107 for (i = 0; i < param->target_count; i++) {
1108
1109 r = next_target(spec, next, end, &spec, &target_params);
1110 if (r) {
1111 DMWARN("unable to find target");
1112 return r;
1113 }
1114
1115 r = dm_table_add_target(table, spec->target_type,
1116 (sector_t) spec->sector_start,
1117 (sector_t) spec->length,
1118 target_params);
1119 if (r) {
1120 DMWARN("error adding target to table");
1121 return r;
1122 }
1123
1124 next = spec->next;
1125 }
1126
e6ee8c0b
KU
1127 r = dm_table_set_type(table);
1128 if (r) {
1129 DMWARN("unable to set table type");
1130 return r;
1131 }
1132
1da177e4
LT
1133 return dm_table_complete(table);
1134}
1135
9c47008d
MP
1136static int table_prealloc_integrity(struct dm_table *t,
1137 struct mapped_device *md)
1138{
1139 struct list_head *devices = dm_table_get_devices(t);
1140 struct dm_dev_internal *dd;
1141
1142 list_for_each_entry(dd, devices, list)
1143 if (bdev_get_integrity(dd->dm_dev.bdev))
1144 return blk_integrity_register(dm_disk(md), NULL);
1145
1146 return 0;
1147}
1148
1da177e4
LT
1149static int table_load(struct dm_ioctl *param, size_t param_size)
1150{
1151 int r;
1152 struct hash_cell *hc;
1153 struct dm_table *t;
1134e5ae
MA
1154 struct mapped_device *md;
1155
1156 md = find_device(param);
1157 if (!md)
1158 return -ENXIO;
1da177e4 1159
1134e5ae 1160 r = dm_table_create(&t, get_mode(param), param->target_count, md);
1da177e4 1161 if (r)
1134e5ae 1162 goto out;
1da177e4
LT
1163
1164 r = populate_table(t, param, param_size);
1165 if (r) {
f80a5570 1166 dm_table_destroy(t);
1134e5ae 1167 goto out;
1da177e4
LT
1168 }
1169
9c47008d
MP
1170 r = table_prealloc_integrity(t, md);
1171 if (r) {
1172 DMERR("%s: could not register integrity profile.",
1173 dm_device_name(md));
1174 dm_table_destroy(t);
1175 goto out;
1176 }
1177
e6ee8c0b
KU
1178 r = dm_table_alloc_md_mempools(t);
1179 if (r) {
1180 DMWARN("unable to allocate mempools for this table");
1181 dm_table_destroy(t);
1182 goto out;
1183 }
1184
1da177e4 1185 down_write(&_hash_lock);
1134e5ae
MA
1186 hc = dm_get_mdptr(md);
1187 if (!hc || hc->md != md) {
1188 DMWARN("device has been removed from the dev hash table.");
f80a5570 1189 dm_table_destroy(t);
1134e5ae
MA
1190 up_write(&_hash_lock);
1191 r = -ENXIO;
1192 goto out;
1da177e4
LT
1193 }
1194
1195 if (hc->new_map)
d5816876 1196 dm_table_destroy(hc->new_map);
1da177e4 1197 hc->new_map = t;
1134e5ae
MA
1198 up_write(&_hash_lock);
1199
1da177e4 1200 param->flags |= DM_INACTIVE_PRESENT_FLAG;
094ea9a0 1201 __dev_status(md, param);
1134e5ae
MA
1202
1203out:
1204 dm_put(md);
1da177e4 1205
1da177e4
LT
1206 return r;
1207}
1208
1209static int table_clear(struct dm_ioctl *param, size_t param_size)
1210{
1da177e4 1211 struct hash_cell *hc;
7ec75f25 1212 struct mapped_device *md;
1da177e4
LT
1213
1214 down_write(&_hash_lock);
1215
1216 hc = __find_device_hash_cell(param);
1217 if (!hc) {
1218 DMWARN("device doesn't appear to be in the dev hash table.");
1219 up_write(&_hash_lock);
1220 return -ENXIO;
1221 }
1222
1223 if (hc->new_map) {
d5816876 1224 dm_table_destroy(hc->new_map);
1da177e4
LT
1225 hc->new_map = NULL;
1226 }
1227
1228 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1229
094ea9a0 1230 __dev_status(hc->md, param);
7ec75f25 1231 md = hc->md;
1da177e4 1232 up_write(&_hash_lock);
7ec75f25 1233 dm_put(md);
094ea9a0
AK
1234
1235 return 0;
1da177e4
LT
1236}
1237
1238/*
1239 * Retrieves a list of devices used by a particular dm device.
1240 */
1241static void retrieve_deps(struct dm_table *table,
1242 struct dm_ioctl *param, size_t param_size)
1243{
1244 unsigned int count = 0;
1245 struct list_head *tmp;
1246 size_t len, needed;
82b1519b 1247 struct dm_dev_internal *dd;
1da177e4
LT
1248 struct dm_target_deps *deps;
1249
1250 deps = get_result_buffer(param, param_size, &len);
1251
1252 /*
1253 * Count the devices.
1254 */
1255 list_for_each (tmp, dm_table_get_devices(table))
1256 count++;
1257
1258 /*
1259 * Check we have enough space.
1260 */
1261 needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
1262 if (len < needed) {
1263 param->flags |= DM_BUFFER_FULL_FLAG;
1264 return;
1265 }
1266
1267 /*
1268 * Fill in the devices.
1269 */
1270 deps->count = count;
1271 count = 0;
1272 list_for_each_entry (dd, dm_table_get_devices(table), list)
82b1519b 1273 deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
1da177e4
LT
1274
1275 param->data_size = param->data_start + needed;
1276}
1277
1278static int table_deps(struct dm_ioctl *param, size_t param_size)
1279{
1da177e4
LT
1280 struct mapped_device *md;
1281 struct dm_table *table;
1282
1283 md = find_device(param);
1284 if (!md)
1285 return -ENXIO;
1286
094ea9a0 1287 __dev_status(md, param);
1da177e4 1288
1d0f3ce8 1289 table = dm_get_live_or_inactive_table(md, param);
1da177e4
LT
1290 if (table) {
1291 retrieve_deps(table, param, param_size);
1292 dm_table_put(table);
1293 }
1294
1da177e4 1295 dm_put(md);
094ea9a0
AK
1296
1297 return 0;
1da177e4
LT
1298}
1299
1300/*
1301 * Return the status of a device as a text string for each
1302 * target.
1303 */
1304static int table_status(struct dm_ioctl *param, size_t param_size)
1305{
1da177e4
LT
1306 struct mapped_device *md;
1307 struct dm_table *table;
1308
1309 md = find_device(param);
1310 if (!md)
1311 return -ENXIO;
1312
094ea9a0 1313 __dev_status(md, param);
1da177e4 1314
1d0f3ce8 1315 table = dm_get_live_or_inactive_table(md, param);
1da177e4
LT
1316 if (table) {
1317 retrieve_status(table, param, param_size);
1318 dm_table_put(table);
1319 }
1320
1da177e4 1321 dm_put(md);
094ea9a0
AK
1322
1323 return 0;
1da177e4
LT
1324}
1325
1326/*
1327 * Pass a message to the target that's at the supplied device offset.
1328 */
1329static int target_message(struct dm_ioctl *param, size_t param_size)
1330{
1331 int r, argc;
1332 char **argv;
1333 struct mapped_device *md;
1334 struct dm_table *table;
1335 struct dm_target *ti;
1336 struct dm_target_msg *tmsg = (void *) param + param->data_start;
1337
1338 md = find_device(param);
1339 if (!md)
1340 return -ENXIO;
1341
027d50f9 1342 if (tmsg < (struct dm_target_msg *) param->data ||
1da177e4
LT
1343 invalid_str(tmsg->message, (void *) param + param_size)) {
1344 DMWARN("Invalid target message parameters.");
1345 r = -EINVAL;
1346 goto out;
1347 }
1348
1349 r = dm_split_args(&argc, &argv, tmsg->message);
1350 if (r) {
1351 DMWARN("Failed to split target message parameters");
1352 goto out;
1353 }
1354
7c666411 1355 table = dm_get_live_table(md);
1da177e4
LT
1356 if (!table)
1357 goto out_argv;
1358
c50abeb3
MA
1359 if (dm_deleting_md(md)) {
1360 r = -ENXIO;
1361 goto out_table;
1362 }
1363
512875bd
JN
1364 ti = dm_table_find_target(table, tmsg->sector);
1365 if (!dm_target_is_valid(ti)) {
1da177e4
LT
1366 DMWARN("Target message sector outside device.");
1367 r = -EINVAL;
512875bd 1368 } else if (ti->type->message)
1da177e4
LT
1369 r = ti->type->message(ti, argc, argv);
1370 else {
1371 DMWARN("Target type does not support messages");
1372 r = -EINVAL;
1373 }
1374
c50abeb3 1375 out_table:
1da177e4
LT
1376 dm_table_put(table);
1377 out_argv:
1378 kfree(argv);
1379 out:
1380 param->data_size = 0;
1381 dm_put(md);
1382 return r;
1383}
1384
1385/*-----------------------------------------------------------------
1386 * Implementation of open/close/ioctl on the special char
1387 * device.
1388 *---------------------------------------------------------------*/
1389static ioctl_fn lookup_ioctl(unsigned int cmd)
1390{
1391 static struct {
1392 int cmd;
1393 ioctl_fn fn;
1394 } _ioctls[] = {
1395 {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */
1396 {DM_REMOVE_ALL_CMD, remove_all},
1397 {DM_LIST_DEVICES_CMD, list_devices},
1398
1399 {DM_DEV_CREATE_CMD, dev_create},
1400 {DM_DEV_REMOVE_CMD, dev_remove},
1401 {DM_DEV_RENAME_CMD, dev_rename},
1402 {DM_DEV_SUSPEND_CMD, dev_suspend},
1403 {DM_DEV_STATUS_CMD, dev_status},
1404 {DM_DEV_WAIT_CMD, dev_wait},
1405
1406 {DM_TABLE_LOAD_CMD, table_load},
1407 {DM_TABLE_CLEAR_CMD, table_clear},
1408 {DM_TABLE_DEPS_CMD, table_deps},
1409 {DM_TABLE_STATUS_CMD, table_status},
1410
1411 {DM_LIST_VERSIONS_CMD, list_versions},
1412
3ac51e74
DW
1413 {DM_TARGET_MSG_CMD, target_message},
1414 {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry}
1da177e4
LT
1415 };
1416
1417 return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
1418}
1419
1420/*
1421 * As well as checking the version compatibility this always
1422 * copies the kernel interface version out.
1423 */
1424static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
1425{
1426 uint32_t version[3];
1427 int r = 0;
1428
1429 if (copy_from_user(version, user->version, sizeof(version)))
1430 return -EFAULT;
1431
1432 if ((DM_VERSION_MAJOR != version[0]) ||
1433 (DM_VERSION_MINOR < version[1])) {
1434 DMWARN("ioctl interface mismatch: "
1435 "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
1436 DM_VERSION_MAJOR, DM_VERSION_MINOR,
1437 DM_VERSION_PATCHLEVEL,
1438 version[0], version[1], version[2], cmd);
1439 r = -EINVAL;
1440 }
1441
1442 /*
1443 * Fill in the kernel version.
1444 */
1445 version[0] = DM_VERSION_MAJOR;
1446 version[1] = DM_VERSION_MINOR;
1447 version[2] = DM_VERSION_PATCHLEVEL;
1448 if (copy_to_user(user->version, version, sizeof(version)))
1449 return -EFAULT;
1450
1451 return r;
1452}
1453
1454static void free_params(struct dm_ioctl *param)
1455{
1456 vfree(param);
1457}
1458
1459static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
1460{
1461 struct dm_ioctl tmp, *dmi;
1462
76c072b4 1463 if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
1da177e4
LT
1464 return -EFAULT;
1465
76c072b4 1466 if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
1da177e4
LT
1467 return -EINVAL;
1468
bb56acf8 1469 dmi = vmalloc(tmp.data_size);
1da177e4
LT
1470 if (!dmi)
1471 return -ENOMEM;
1472
1473 if (copy_from_user(dmi, user, tmp.data_size)) {
1474 vfree(dmi);
1475 return -EFAULT;
1476 }
1477
1478 *param = dmi;
1479 return 0;
1480}
1481
1482static int validate_params(uint cmd, struct dm_ioctl *param)
1483{
1484 /* Always clear this flag */
1485 param->flags &= ~DM_BUFFER_FULL_FLAG;
3abf85b5 1486 param->flags &= ~DM_UEVENT_GENERATED_FLAG;
1da177e4
LT
1487
1488 /* Ignores parameters */
1489 if (cmd == DM_REMOVE_ALL_CMD ||
1490 cmd == DM_LIST_DEVICES_CMD ||
1491 cmd == DM_LIST_VERSIONS_CMD)
1492 return 0;
1493
1494 if ((cmd == DM_DEV_CREATE_CMD)) {
1495 if (!*param->name) {
1496 DMWARN("name not supplied when creating device");
1497 return -EINVAL;
1498 }
1499 } else if ((*param->uuid && *param->name)) {
1500 DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
1501 return -EINVAL;
1502 }
1503
1504 /* Ensure strings are terminated */
1505 param->name[DM_NAME_LEN - 1] = '\0';
1506 param->uuid[DM_UUID_LEN - 1] = '\0';
1507
1508 return 0;
1509}
1510
27238b2b 1511static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
1da177e4
LT
1512{
1513 int r = 0;
1514 unsigned int cmd;
a26ffd4a 1515 struct dm_ioctl *uninitialized_var(param);
1da177e4
LT
1516 ioctl_fn fn = NULL;
1517 size_t param_size;
1518
1519 /* only root can play with this */
1520 if (!capable(CAP_SYS_ADMIN))
1521 return -EACCES;
1522
1523 if (_IOC_TYPE(command) != DM_IOCTL)
1524 return -ENOTTY;
1525
1526 cmd = _IOC_NR(command);
1527
1528 /*
1529 * Check the interface version passed in. This also
1530 * writes out the kernel's interface version.
1531 */
1532 r = check_version(cmd, user);
1533 if (r)
1534 return r;
1535
1536 /*
1537 * Nothing more to do for the version command.
1538 */
1539 if (cmd == DM_VERSION_CMD)
1540 return 0;
1541
1542 fn = lookup_ioctl(cmd);
1543 if (!fn) {
1544 DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
1545 return -ENOTTY;
1546 }
1547
1548 /*
1549 * Trying to avoid low memory issues when a device is
1550 * suspended.
1551 */
1552 current->flags |= PF_MEMALLOC;
1553
1554 /*
1555 * Copy the parameters into kernel space.
1556 */
1557 r = copy_params(user, &param);
1da177e4 1558
dab6a429
AK
1559 current->flags &= ~PF_MEMALLOC;
1560
1561 if (r)
1562 return r;
1da177e4
LT
1563
1564 r = validate_params(cmd, param);
1565 if (r)
1566 goto out;
1567
1568 param_size = param->data_size;
1569 param->data_size = sizeof(*param);
1570 r = fn(param, param_size);
1571
1572 /*
1573 * Copy the results back to userland.
1574 */
1575 if (!r && copy_to_user(user, param, param->data_size))
1576 r = -EFAULT;
1577
1578 out:
1579 free_params(param);
1da177e4
LT
1580 return r;
1581}
1582
27238b2b
AK
1583static long dm_ctl_ioctl(struct file *file, uint command, ulong u)
1584{
1585 return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u);
1586}
1587
76c072b4
MB
1588#ifdef CONFIG_COMPAT
1589static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
1590{
1591 return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u));
1592}
1593#else
1594#define dm_compat_ctl_ioctl NULL
1595#endif
1596
fa027c2a 1597static const struct file_operations _ctl_fops = {
27238b2b 1598 .unlocked_ioctl = dm_ctl_ioctl,
76c072b4 1599 .compat_ioctl = dm_compat_ctl_ioctl,
1da177e4
LT
1600 .owner = THIS_MODULE,
1601};
1602
1603static struct miscdevice _dm_misc = {
1604 .minor = MISC_DYNAMIC_MINOR,
1605 .name = DM_NAME,
e454cea2 1606 .nodename = "mapper/control",
1da177e4
LT
1607 .fops = &_ctl_fops
1608};
1609
1610/*
1611 * Create misc character device and link to DM_DIR/control.
1612 */
1613int __init dm_interface_init(void)
1614{
1615 int r;
1616
1617 r = dm_hash_init();
1618 if (r)
1619 return r;
1620
1621 r = misc_register(&_dm_misc);
1622 if (r) {
1623 DMERR("misc_register failed for control device");
1624 dm_hash_exit();
1625 return r;
1626 }
1627
1628 DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR,
1629 DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA,
1630 DM_DRIVER_EMAIL);
1631 return 0;
1632}
1633
1634void dm_interface_exit(void)
1635{
1636 if (misc_deregister(&_dm_misc) < 0)
1637 DMERR("misc_deregister failed for control device");
1638
1639 dm_hash_exit();
1640}
96a1f7db
MA
1641
1642/**
1643 * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers
1644 * @md: Pointer to mapped_device
1645 * @name: Buffer (size DM_NAME_LEN) for name
1646 * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined
1647 */
1648int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
1649{
1650 int r = 0;
1651 struct hash_cell *hc;
1652
1653 if (!md)
1654 return -ENXIO;
1655
6076905b 1656 mutex_lock(&dm_hash_cells_mutex);
96a1f7db
MA
1657 hc = dm_get_mdptr(md);
1658 if (!hc || hc->md != md) {
1659 r = -ENXIO;
1660 goto out;
1661 }
1662
23d39f63
MB
1663 if (name)
1664 strcpy(name, hc->name);
1665 if (uuid)
1666 strcpy(uuid, hc->uuid ? : "");
96a1f7db
MA
1667
1668out:
6076905b 1669 mutex_unlock(&dm_hash_cells_mutex);
96a1f7db
MA
1670
1671 return r;
1672}