Fixup ioctls
[binject.git] / main.c
1 /*
2  * TODO
3  *
4  * - Proper ioctls
5  * - Get rid of device list?
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/file.h>
14 #include <linux/miscdevice.h>
15 #include <linux/cdev.h>
16 #include <linux/bio.h>
17 #include <linux/blkdev.h>
18
19 #include "kcompat.h"
20 #include "binject.h"
21
22 static LIST_HEAD(b_dev_list);
23 static DEFINE_SPINLOCK(b_dev_lock);
24 static DEFINE_IDR(b_minor_idr);
25 static struct kmem_cache *b_slab;
26 static struct class *b_class;
27 static int b_major;
28
29 #define B_MAX_DEVS      64
30
31 struct b_dev_cpu {
32         spinlock_t lock;
33         struct list_head done_list;
34 };
35
36 struct b_dev {
37         struct list_head device_list;
38         struct list_head reaped_done;
39         spinlock_t done_lock;
40         atomic_t in_flight;
41         wait_queue_head_t wq_done;
42         struct block_device *bdev;
43         atomic_t ref;
44         struct file *file;
45         struct device *dev;
46         int minor;
47         struct b_dev_cpu __percpu *cpu_queue;
48         struct rcu_head rcu_free;
49 };
50
51 struct b_cmd {
52         struct list_head list;
53         struct b_dev *bd;
54         struct bio *bio;
55         struct b_user_cmd cmd;
56         u64 issue_time;
57 };
58
59 static const unsigned long uc_flag_map[__B_FLAG_NR] = {
60         B_REQ_SYNC,
61         B_REQ_UNPLUG,
62         B_REQ_NOIDLE,
63         B_REQ_HARDBARRIER,
64         B_REQ_META,
65         B_REQ_RAHEAD,
66         B_REQ_FAILFAST_DEV,
67         B_REQ_FAILFAST_TRANSPORT,
68         B_REQ_FAILFAST_DRIVER
69 };
70
71 struct uc_map {
72         int type;
73         unsigned int data_transfer : 1;
74         unsigned int todevice : 1;
75         unsigned int map_zero : 1;
76         unsigned long rw_flags;
77 };
78
79 static const struct uc_map uc_map[B_TYPE_NR] = {
80         {
81                 .type           = B_TYPE_READ,
82                 .data_transfer  = 1,
83                 .todevice       = 0,
84                 .map_zero       = 0,
85         },
86         {
87                 .type           = B_TYPE_WRITE,
88                 .data_transfer  = 1,
89                 .todevice       = 1,
90                 .map_zero       = 0,
91                 .rw_flags       = B_REQ_WRITE,
92         },
93         {
94                 .type           = B_TYPE_DISCARD,
95                 .data_transfer  = 0,
96                 .todevice       = 0,
97                 .map_zero       = 0,
98                 .rw_flags       = B_REQ_DISCARD | B_REQ_WRITE,
99         },
100         {
101                 .type           = B_TYPE_READVOID,
102                 .data_transfer  = 1,
103                 .todevice       = 0,
104                 .map_zero       = 1,
105         },
106         {
107                 .type           = B_TYPE_WRITEZERO,
108                 .data_transfer  = 1,
109                 .todevice       = 1,
110                 .map_zero       = 1,
111                 .rw_flags       = B_REQ_WRITE,
112         },
113         {
114                 .type           = B_TYPE_READBARRIER,
115                 .data_transfer  = 1,
116                 .todevice       = 0,
117                 .map_zero       = 0,
118                 .rw_flags       = B_REQ_HARDBARRIER,
119         },
120         {
121                 .type           = B_TYPE_WRITEBARRIER,
122                 .data_transfer  = 1,
123                 .todevice       = 1,
124                 .map_zero       = 0,
125                 .rw_flags       = B_REQ_HARDBARRIER | B_REQ_FLUSH | B_REQ_WRITE,
126         }
127 };
128
129 static void b_dev_complete_commands(struct b_dev *bd);
130
131 static void b_dev_remove_lookup(struct b_dev *bd)
132 {
133         if (!list_empty(&bd->device_list)) {
134                 list_del_init(&bd->device_list);
135                 idr_remove(&b_minor_idr, bd->minor);
136         }
137 }
138
139 static void bd_rcu_free(struct rcu_head *head)
140 {
141         struct b_dev *bd = container_of(head, struct b_dev, rcu_free);
142
143         free_percpu(bd->cpu_queue);
144         kfree(bd);
145 }
146
147 static void b_dev_put(struct b_dev *bd)
148 {
149         if (!atomic_dec_and_test(&bd->ref))
150                 return;
151
152         spin_lock(&b_dev_lock);
153         b_dev_remove_lookup(bd);
154         spin_unlock(&b_dev_lock);
155
156         b_dev_complete_commands(bd);
157
158         device_destroy(b_class, MKDEV(b_major, bd->minor));
159         fput(bd->file);
160         module_put(THIS_MODULE);
161
162         call_rcu(&bd->rcu_free, bd_rcu_free);
163 }
164
165 static struct b_cmd *get_free_command(struct b_dev *bd)
166 {
167         struct b_cmd *bc;
168
169         bc = kmem_cache_alloc(b_slab, GFP_KERNEL);
170         if (bc) {
171                 bc->bd = bd;
172                 return bc;
173         }
174
175         return ERR_PTR(-ENOMEM);
176 }
177
178 static struct b_cmd *get_completed_command(struct b_dev *bd)
179 {
180         struct b_cmd *bc = NULL;
181         int cpu, spliced = 0;
182
183         spin_lock(&bd->done_lock);
184         if (!list_empty(&bd->reaped_done)) {
185 ret_one:
186                 bc = list_entry(bd->reaped_done.next, struct b_cmd, list);
187                 list_del_init(&bc->list);
188         }
189         spin_unlock(&bd->done_lock);
190
191         if (bc)
192                 return bc;
193         
194         spin_lock(&bd->done_lock);
195         for_each_possible_cpu(cpu) {
196                 struct b_dev_cpu *bdc = per_cpu_ptr(bd->cpu_queue, cpu);
197
198                 spin_lock_irq(&bdc->lock);
199                 if (!list_empty(&bdc->done_list)) {
200                         list_splice_init(&bdc->done_list, &bd->reaped_done);
201                         spliced++;
202                 }
203                 spin_unlock_irq(&bdc->lock);
204         }
205
206         if (spliced)
207                 goto ret_one;
208
209         spin_unlock(&bd->done_lock);
210         return NULL;
211 }
212
213 static int bd_pending_done(struct b_dev *bd)
214 {
215         int cpu;
216
217         for_each_possible_cpu(cpu) {
218                 struct b_dev_cpu *bdc = per_cpu_ptr(bd->cpu_queue, cpu);
219
220                 if (!list_empty_careful(&bdc->done_list))
221                         return 1;
222         }
223
224         return 0;
225 }
226
227 static struct b_cmd *get_done_command(struct b_dev *bd, int block)
228 {
229         struct b_cmd *bc;
230         int ret;
231
232         do {
233                 bc = get_completed_command(bd);
234                 if (bc)
235                         break;
236
237                 if (!block)
238                         break;
239
240                 ret = wait_event_interruptible(bd->wq_done, bd_pending_done(bd));
241                 if (ret) {
242                         bc = ERR_PTR(-ERESTARTSYS);
243                         break;
244                 }
245         } while (1);
246
247         return bc;
248 }
249
250 static void bc_put_bio_pages(struct bio *bio)
251 {
252         struct bio_vec *bv;
253         unsigned int i;
254
255         __bio_for_each_segment(bv, bio, i, 0) {
256                 if (bv->bv_page != ZERO_PAGE(0))
257                         __free_page(bv->bv_page);
258         }
259 }
260
261 static void complete_and_free_bio(struct b_cmd *bc)
262 {
263         if (bc->bio) {
264                 const struct uc_map *ucm = &uc_map[bc->cmd.type];
265
266                 if (ucm->data_transfer) {
267                         if (!ucm->map_zero)
268                                 bio_unmap_user(bc->bio);
269                         else
270                                 bc_put_bio_pages(bc->bio);
271                 }
272                 bio_put(bc->bio);
273                 bc->bio = NULL;
274         }
275 }
276
277 static void b_dev_complete_commands(struct b_dev *bd)
278 {
279         struct b_cmd *bc;
280
281         wait_event(bd->wq_done, !atomic_read(&bd->in_flight));
282
283         while ((bc = get_completed_command(bd)) != NULL)
284                 complete_and_free_bio(bc);
285 }
286
287 static int b_dev_validate_command(struct b_user_cmd *buc)
288 {
289         int i;
290
291         if (!binject_buc_check_magic(buc))
292                 return -EINVAL;
293
294         for (i = 0; i < B_TYPE_NR; i++) {
295                 const struct uc_map *ucm = &uc_map[i];
296
297                 if (ucm->type != buc->type)
298                         continue;
299                 if (ucm->data_transfer && !buc->len)
300                         break;
301
302                 return 0;
303         }
304
305         return -EINVAL;
306 }
307
308 static void b_cmd_endio(struct bio *bio, int error)
309 {
310         struct b_cmd *bc = bio->bi_private;
311         struct b_dev *bd = bc->bd;
312         struct b_dev_cpu *bdc;
313         unsigned long flags;
314         unsigned long now;
315
316         now = ktime_to_ns(ktime_get());
317         bc->cmd.nsec = now - bc->issue_time;
318         bc->cmd.error = error;
319
320         local_irq_save(flags);
321         bdc = per_cpu_ptr(bd->cpu_queue, smp_processor_id());
322
323         spin_lock(&bdc->lock);
324         list_add_tail(&bc->list, &bdc->done_list);
325         spin_unlock_irqrestore(&bdc->lock, flags);
326
327         atomic_dec(&bd->in_flight);
328
329         smp_mb();
330         if (waitqueue_active(&bd->wq_done))
331                 wake_up(&bd->wq_done);
332 }
333
334 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
335 static int bio_cmd_endio(struct bio *bio, unsigned int bytes, int err)
336 {
337         if (bio->bi_size)
338                 return 1;
339
340         b_cmd_endio(bio, err);
341         return 0;
342 }
343 #else
344 static void bio_cmd_endio(struct bio *bio, int err)
345 {
346         b_cmd_endio(bio, err);
347 }
348 #endif
349
350 #define len_to_pages(len)      ((len + PAGE_SIZE - 1) / PAGE_SIZE)
351
352 static int zero_map_bio(struct request_queue *q, struct bio *bio,
353                         const struct uc_map *ucm, unsigned int len)
354 {
355         unsigned int i, nr_pages, this_len, ret, err;
356         struct page *page;
357
358         nr_pages = len_to_pages(len);
359         for (i = 0; i < nr_pages; i++) {
360                 if (ucm->todevice)
361                         page = ZERO_PAGE(0);
362                 else {
363                         page = alloc_page(GFP_KERNEL);
364                         if (!page) {
365                                 err = -ENOMEM;
366                                 goto oom;
367                         }
368                 }
369
370                 this_len = PAGE_SIZE;
371                 if (this_len > len)
372                         this_len = len;
373
374                 ret = bio_add_pc_page(q, bio, page, this_len, 0);
375                 if (ret < this_len) {
376                         err = -E2BIG;
377                         goto oom;
378                 }
379         }
380         return 0;
381 oom:
382         bc_put_bio_pages(bio);
383         return err;
384 }
385
386 static void map_uc_to_bio_flags(struct bio *bio, struct b_user_cmd *uc)
387 {
388         unsigned int i;
389
390         for (i = 0; i < 8 * sizeof(uc->flags); i++) {
391                 unsigned long mask;
392
393                 if (uc->flags & (1UL << i))
394                         bio->bi_rw |= uc_flag_map[i];
395
396                 mask = ~((1UL << i) - 1);
397                 if (!(mask & uc->flags))
398                         break;
399         }
400 }
401
402 static struct bio *map_uc_to_bio(struct b_dev *bd, struct b_user_cmd *uc)
403 {
404         struct request_queue *q = bdev_get_queue(bd->bdev);
405         const struct uc_map *ucm = &uc_map[uc->type];
406         struct bio *bio;
407
408         if (ucm->data_transfer && !ucm->map_zero) {
409                 bio = binject_map_bio(q, bd->bdev, uc->buf, uc->len,
410                                         !ucm->todevice, GFP_KERNEL);
411         } else {
412                 bio = bio_alloc(GFP_KERNEL, len_to_pages(uc->len));
413                 if (bio) {
414                         bio->bi_bdev = bd->bdev;
415                         if (ucm->map_zero && uc->len) {
416                                 int err;
417
418                                 err = zero_map_bio(q, bio, ucm, uc->len);
419                                 if (err) {
420                                         bio_put(bio);
421                                         bio = ERR_PTR(err);
422                                 }
423                         } else
424                                 bio->bi_size = uc->len;
425                 }
426         }
427
428         if (!bio)
429                 bio = ERR_PTR(-ENOMEM);
430         else if (!IS_ERR(bio)) {
431                 map_uc_to_bio_flags(bio, uc);
432                 bio->bi_sector = uc->offset / binject_get_bs(q);
433                 bio->bi_rw |= ucm->rw_flags;
434         }
435
436         return bio;
437 }
438
439 static int b_dev_add_command(struct b_dev *bd, struct b_cmd *bc)
440 {
441         struct b_user_cmd *uc = &bc->cmd;
442         struct bio *bio;
443
444         bio = map_uc_to_bio(bd, uc);
445         if (IS_ERR(bio))
446                 return PTR_ERR(bio);
447
448         bio_get(bio);
449         bc->bio = bio;
450
451         bio->bi_end_io = bio_cmd_endio;
452         bio->bi_private = bc;
453
454         bc->issue_time = ktime_to_ns(ktime_get());
455
456         atomic_inc(&bd->in_flight);
457         submit_bio(bio->bi_rw, bio);
458         return 0;
459 }
460
461 static void b_dev_free_command(struct b_dev *bd, struct b_cmd *bc)
462 {
463         BUG_ON(!list_empty(&bc->list));
464         kmem_cache_free(b_slab, bc);
465 }
466
467 /*
468  * We are always writable, as we have an infinite queue depth
469  */
470 static unsigned int b_dev_poll(struct file *file, poll_table *wait)
471 {
472         struct b_dev *bd = file->private_data;
473         unsigned int mask = POLLOUT;
474
475         poll_wait(file, &bd->wq_done, wait);
476
477         if (bd_pending_done(bd))
478                 mask |= POLLIN | POLLRDNORM;
479
480         return mask;
481 }
482
483 static int b_dev_release(struct inode *inode, struct file *file)
484 {
485         struct b_dev *bd = file->private_data;
486
487         b_dev_put(bd);
488         return 0;
489 }
490
491 static struct b_dev *b_dev_lookup(int minor)
492 {
493         struct b_dev *bd;
494
495         rcu_read_lock();
496
497         bd = idr_find(&b_minor_idr, minor);
498         if (bd && !atomic_inc_not_zero(&bd->ref))
499                 bd = NULL;
500
501         rcu_read_unlock();
502         return bd;
503 }
504
505 static int b_dev_open(struct inode *inode, struct file *file)
506 {
507         struct b_dev *bd;
508
509         bd = b_dev_lookup(iminor(inode));
510         if (!bd)
511                 return -ENODEV;
512
513         file->private_data = bd;
514         return 0;
515 }
516
517 static ssize_t b_dev_write(struct file *file, const char __user *buf,
518                            size_t count, loff_t *ppos)
519 {
520         struct b_dev *bd = file->private_data;
521         struct b_cmd *bc = NULL;
522         unsigned int total;
523         ssize_t done = 0;
524         int err = 0;
525
526         if (count % sizeof(struct b_user_cmd))
527                 return -EINVAL;
528
529         total = count / sizeof(struct b_user_cmd);
530         while (total) {
531                 bc = get_free_command(bd);
532                 if (IS_ERR(bc)) {
533                         err = PTR_ERR(bc);
534                         bc = NULL;
535                         break;
536                 }
537
538                 if (copy_from_user(&bc->cmd, buf, sizeof(struct b_user_cmd))) {
539                         err = -EFAULT;
540                         break;
541                 }
542
543                 err = b_dev_validate_command(&bc->cmd);
544                 if (err)
545                         break;
546
547                 err = b_dev_add_command(bd, bc);
548                 if (err)
549                         break;
550
551                 done += sizeof(struct b_user_cmd);
552                 buf += sizeof(struct b_user_cmd);
553                 total--;
554                 bc = NULL;
555         }
556
557         if (bc)
558                 b_dev_free_command(bd, bc);
559
560         *ppos = done;
561         if (!done)
562                 done = err;
563
564         return done;
565 }
566
567 static ssize_t b_dev_read(struct file *file, char __user *buf, size_t count,
568                           loff_t *ppos)
569 {
570         struct b_dev *bd = file->private_data;
571         unsigned int total;
572         ssize_t done = 0;
573         int err = 0;
574
575         if (count % sizeof(struct b_user_cmd))
576                 return -EINVAL;
577
578         total = count / sizeof(struct b_user_cmd);
579         while (total) {
580                 struct b_cmd *bc;
581
582                 bc = get_done_command(bd, !(file->f_flags & O_NONBLOCK));
583                 if (IS_ERR(bc)) {
584                         err = PTR_ERR(bc);
585                         break;
586                 }
587
588                 complete_and_free_bio(bc);
589
590                 if (copy_to_user(buf, &bc->cmd, sizeof(bc->cmd)))
591                         err = -EFAULT;
592
593                 b_dev_free_command(bd, bc);
594
595                 if (err)
596                         break;
597
598                 done += sizeof(struct b_user_cmd);
599                 buf += sizeof(struct b_user_cmd);
600                 total--;
601         }
602
603         *ppos = done;
604         if (!done)
605                 done = err;
606
607         return done;
608 }
609
610 static const struct file_operations b_dev_fops = {
611         .open           = b_dev_open,
612         .release        = b_dev_release,
613         .read           = b_dev_read,
614         .write          = b_dev_write,
615         .poll           = b_dev_poll,
616         .owner          = THIS_MODULE,
617 };
618
619 static int b_del_dev(struct b_ioctl_cmd *bic)
620 {
621         struct b_dev *bd;
622
623         bd = b_dev_lookup(bic->minor);
624         if (bd) {
625                 spin_lock(&b_dev_lock);
626                 b_dev_remove_lookup(bd);
627                 spin_unlock(&b_dev_lock);
628
629                 /*
630                  * Our lookup grabbed a reference, drop two
631                  */
632                 b_dev_put(bd);
633                 b_dev_put(bd);
634                 return 0;
635         }
636
637         return -ENODEV;
638 }
639
640 static int b_add_dev(struct b_ioctl_cmd *bic)
641 {
642         struct inode *inode;
643         struct file *file;
644         struct b_dev *bd;
645         int ret, cpu;
646
647         file = fget(bic->fd);
648         if (!file)
649                 return -EBADF;
650
651         __module_get(THIS_MODULE);
652
653         inode = file->f_mapping->host;
654         if (!S_ISBLK(inode->i_mode)) {
655                 ret = -EINVAL;
656                 goto out_put;
657         }
658
659         ret = idr_pre_get(&b_minor_idr, GFP_KERNEL);
660         if (!ret) {
661                 ret = -ENOMEM;
662                 goto out_put;
663         }
664
665         bd = kzalloc(sizeof(*bd), GFP_KERNEL);
666         if (!bd) {
667                 ret = -ENOMEM;
668                 goto out_put;
669         }
670
671         bd->cpu_queue = alloc_percpu(struct b_dev_cpu);
672         if (!bd->cpu_queue) {
673                 kfree(bd);
674                 ret = -ENOMEM;
675                 goto out_put;
676         }
677
678         for_each_possible_cpu(cpu) {
679                 struct b_dev_cpu *bdc;
680
681                 bdc = per_cpu_ptr(bd->cpu_queue, cpu);
682                 INIT_LIST_HEAD(&bdc->done_list);
683                 spin_lock_init(&bdc->lock);
684         }
685
686         atomic_set(&bd->ref, 1);
687         spin_lock_init(&bd->done_lock);
688         INIT_LIST_HEAD(&bd->reaped_done);
689         init_waitqueue_head(&bd->wq_done);
690         bd->file = file;
691         bd->bdev = inode->i_bdev;;
692
693         spin_lock(&b_dev_lock);
694
695         ret = idr_get_new(&b_minor_idr, bd, &bd->minor);
696         if (ret < 0)
697                 goto out_unlock;
698
699         if (bd->minor >= B_MAX_DEVS)
700                 goto out_idr;
701
702         spin_unlock(&b_dev_lock);
703
704         INIT_LIST_HEAD(&bd->device_list);
705         bd->dev = binject_device_create(b_class, NULL,
706                         MKDEV(b_major, bd->minor), bd, "binject%d", bd->minor);
707
708         spin_lock(&b_dev_lock);
709
710         if (IS_ERR(bd->dev))
711                 goto out_idr;
712
713         list_add_tail(&bd->device_list, &b_dev_list);
714         bic->minor = bd->minor;
715         spin_unlock(&b_dev_lock);
716         return 0;
717 out_idr:
718         idr_remove(&b_minor_idr, bd->minor);
719 out_unlock:
720         spin_unlock(&b_dev_lock);
721         free_percpu(bd->cpu_queue);
722         kfree(bd);
723 out_put:
724         fput(file);
725         module_put(THIS_MODULE);
726         return ret;
727 }
728
729 static long b_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
730 {
731         int __user *uarg = (int __user *) arg;
732         struct b_ioctl_cmd bic;
733         int ret = -ENOTTY;
734
735         if (copy_from_user(&bic, uarg, sizeof(bic)))
736                 return -EFAULT;
737
738         switch (cmd) {
739         case B_IOCTL_ADD:
740                 ret = b_add_dev(&bic);
741                 if (!ret && copy_to_user(uarg, &bic, sizeof(bic))) {
742                         b_del_dev(&bic);
743                         ret = -EFAULT;
744                 }
745                 break;
746         case B_IOCTL_DEL:
747                 ret =  b_del_dev(&bic);
748                 break;
749         default:
750                 break;
751         }
752
753         return ret;
754 }
755
756 static const struct file_operations b_misc_fops = {
757         .unlocked_ioctl = b_misc_ioctl,
758         .owner          = THIS_MODULE,
759 };
760
761 static struct miscdevice b_misc_dev = {
762         .minor          = MISC_DYNAMIC_MINOR,
763         .name           = "binject-ctl",
764         .fops           = &b_misc_fops,
765 };
766
767 static void __exit b_exit(void)
768 {
769         synchronize_rcu();
770         kmem_cache_destroy(b_slab);
771         class_destroy(b_class);
772         misc_deregister(&b_misc_dev);
773 }
774
775 static void __b_cmd_init_once(struct b_cmd *bc)
776 {
777         INIT_LIST_HEAD(&bc->list);
778 }
779
780 #ifdef KCOMPAT_OLD_SLAB
781 static void b_cmd_init_once(void *data, kmem_cache_t *slab, unsigned long flags)
782 {
783         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
784             SLAB_CTOR_CONSTRUCTOR)
785                 __b_cmd_init_once(data);
786 }
787 #else
788 static void b_cmd_init_once(void *data)
789 {
790         __b_cmd_init_once(data);
791 }
792 #endif
793
794 static int __init b_init(void)
795 {
796         int ret;
797
798         b_slab = binject_create_slab("binject", sizeof(struct b_cmd),
799                                         SLAB_HWCACHE_ALIGN, b_cmd_init_once);
800         if (!b_slab) {
801                 printk(KERN_ERR "binject: failed to create cmd slab\n");
802                 return -ENOMEM;
803         }
804
805         ret = misc_register(&b_misc_dev);
806         if (ret < 0)
807                 goto fail_misc;
808
809         b_major = register_chrdev(0, "binject", &b_dev_fops);
810         if (b_major < 0)
811                 goto fail_chr;
812
813         b_class = class_create(THIS_MODULE, "binject");
814         if (IS_ERR(b_class))
815                 goto fail_class;
816
817         return 0;
818 fail_class:
819         unregister_chrdev(b_major, "binject");
820 fail_chr:
821         misc_deregister(&b_misc_dev);
822 fail_misc:
823         kmem_cache_destroy(b_slab);
824         return ret;
825 }
826
827 module_init(b_init);
828 module_exit(b_exit);
829
830 MODULE_LICENSE("GPL");
831 MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");