2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/bitmap.h>
15 #include <linux/sched/signal.h>
16 #include <linux/poll.h>
17 #include <linux/pid.h>
20 #include <linux/slab.h>
21 #include <asm/cputable.h>
22 #include <asm/current.h>
23 #include <asm/copro.h>
28 #define CXL_NUM_MINORS 256 /* Total to reserve */
30 #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
31 #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
32 #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
33 #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
34 #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
35 #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
37 #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
39 #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
43 static struct class *cxl_class;
45 static int __afu_open(struct inode *inode, struct file *file, bool master)
49 struct cxl_context *ctx;
50 int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
51 int slice = CXL_DEVT_AFU(inode->i_rdev);
54 pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
56 if (!(adapter = get_cxl_adapter(adapter_num)))
59 if (slice > adapter->slices)
62 spin_lock(&adapter->afu_list_lock);
63 if (!(afu = adapter->afu[slice])) {
64 spin_unlock(&adapter->afu_list_lock);
69 * taking a ref to the afu so that it doesn't go away
70 * for rest of the function. This ref is released before
74 spin_unlock(&adapter->afu_list_lock);
76 if (!afu->current_mode)
79 if (!cxl_ops->link_ok(adapter, afu)) {
84 if (!(ctx = cxl_context_alloc())) {
89 rc = cxl_context_init(ctx, afu, master);
93 cxl_context_set_mapping(ctx, inode->i_mapping);
95 pr_devel("afu_open pe: %i\n", ctx->pe);
96 file->private_data = ctx;
99 /* indicate success */
103 /* release the ref taken earlier */
106 put_device(&adapter->dev);
110 int afu_open(struct inode *inode, struct file *file)
112 return __afu_open(inode, file, false);
115 static int afu_master_open(struct inode *inode, struct file *file)
117 return __afu_open(inode, file, true);
120 int afu_release(struct inode *inode, struct file *file)
122 struct cxl_context *ctx = file->private_data;
124 pr_devel("%s: closing cxl file descriptor. pe: %i\n",
126 cxl_context_detach(ctx);
130 * Delete the context's mapping pointer, unless it's created by the
131 * kernel API, in which case leave it so it can be freed by reclaim_ctx()
133 if (!ctx->kernelapi) {
134 mutex_lock(&ctx->mapping_lock);
136 mutex_unlock(&ctx->mapping_lock);
140 * At this this point all bottom halfs have finished and we should be
141 * getting no more IRQs from the hardware for this context. Once it's
142 * removed from the IDR (and RCU synchronised) it's safe to free the
145 cxl_context_free(ctx);
150 static long afu_ioctl_start_work(struct cxl_context *ctx,
151 struct cxl_ioctl_start_work __user *uwork)
153 struct cxl_ioctl_start_work work;
157 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
159 /* Do this outside the status_mutex to avoid a circular dependency with
160 * the locking in cxl_mmap_fault() */
161 if (copy_from_user(&work, uwork,
162 sizeof(struct cxl_ioctl_start_work))) {
167 mutex_lock(&ctx->status_mutex);
168 if (ctx->status != OPENED) {
174 * if any of the reserved fields are set or any of the unused
175 * flags are set it's invalid
177 if (work.reserved1 || work.reserved2 || work.reserved3 ||
178 work.reserved4 || work.reserved5 || work.reserved6 ||
179 (work.flags & ~CXL_START_WORK_ALL)) {
184 if (!(work.flags & CXL_START_WORK_NUM_IRQS))
185 work.num_interrupts = ctx->afu->pp_irqs;
186 else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
187 (work.num_interrupts > ctx->afu->irqs_max)) {
191 if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
194 if (work.flags & CXL_START_WORK_AMR)
195 amr = work.amr & mfspr(SPRN_UAMOR);
197 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
200 * Increment the mapped context count for adapter. This also checks
201 * if adapter_context_lock is taken.
203 rc = cxl_adapter_context_get(ctx->afu->adapter);
205 afu_release_irqs(ctx, ctx);
210 * We grab the PID here and not in the file open to allow for the case
211 * where a process (master, some daemon, etc) has opened the chardev on
212 * behalf of another process, so the AFU's mm gets bound to the process
213 * that performs this ioctl and not the process that opened the file.
214 * Also we grab the PID of the group leader so that if the task that
215 * has performed the attach operation exits the mm context of the
216 * process is still accessible.
218 ctx->pid = get_task_pid(current, PIDTYPE_PID);
219 ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
222 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
224 if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
226 afu_release_irqs(ctx, ctx);
227 cxl_adapter_context_put(ctx->afu->adapter);
230 ctx->glpid = ctx->pid = NULL;
234 ctx->status = STARTED;
237 mutex_unlock(&ctx->status_mutex);
241 static long afu_ioctl_process_element(struct cxl_context *ctx,
244 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
246 if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
252 static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
253 struct cxl_afu_id __user *upafuid)
255 struct cxl_afu_id afuid = { 0 };
257 afuid.card_id = ctx->afu->adapter->adapter_num;
258 afuid.afu_offset = ctx->afu->slice;
259 afuid.afu_mode = ctx->afu->current_mode;
261 /* set the flag bit in case the afu is a slave */
262 if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
263 afuid.flags |= CXL_AFUID_FLAG_SLAVE;
265 if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
271 long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
273 struct cxl_context *ctx = file->private_data;
275 if (ctx->status == CLOSED)
278 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
281 pr_devel("afu_ioctl\n");
283 case CXL_IOCTL_START_WORK:
284 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
285 case CXL_IOCTL_GET_PROCESS_ELEMENT:
286 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
287 case CXL_IOCTL_GET_AFU_ID:
288 return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
294 static long afu_compat_ioctl(struct file *file, unsigned int cmd,
297 return afu_ioctl(file, cmd, arg);
300 int afu_mmap(struct file *file, struct vm_area_struct *vm)
302 struct cxl_context *ctx = file->private_data;
304 /* AFU must be started before we can MMIO */
305 if (ctx->status != STARTED)
308 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
311 return cxl_context_iomap(ctx, vm);
314 static inline bool ctx_event_pending(struct cxl_context *ctx)
316 if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
319 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
325 unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
327 struct cxl_context *ctx = file->private_data;
332 poll_wait(file, &ctx->wq, poll);
334 pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
336 spin_lock_irqsave(&ctx->lock, flags);
337 if (ctx_event_pending(ctx))
338 mask |= POLLIN | POLLRDNORM;
339 else if (ctx->status == CLOSED)
340 /* Only error on closed when there are no futher events pending
343 spin_unlock_irqrestore(&ctx->lock, flags);
345 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
350 static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
352 struct cxl_event *event,
353 struct cxl_event_afu_driver_reserved *pl)
357 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
361 /* Check event size */
362 event->header.size += pl->data_size;
363 if (event->header.size > CXL_READ_MIN_SIZE) {
364 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
368 /* Copy event header */
369 if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
370 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
374 /* Copy event data */
375 buf += sizeof(struct cxl_event_header);
376 if (copy_to_user(buf, &pl->data, pl->data_size)) {
377 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
381 ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
382 return event->header.size;
385 ssize_t afu_read(struct file *file, char __user *buf, size_t count,
388 struct cxl_context *ctx = file->private_data;
389 struct cxl_event_afu_driver_reserved *pl = NULL;
390 struct cxl_event event;
395 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
398 if (count < CXL_READ_MIN_SIZE)
401 spin_lock_irqsave(&ctx->lock, flags);
404 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
405 if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
408 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
413 if (file->f_flags & O_NONBLOCK) {
418 if (signal_pending(current)) {
423 spin_unlock_irqrestore(&ctx->lock, flags);
424 pr_devel("afu_read going to sleep...\n");
426 pr_devel("afu_read woken up\n");
427 spin_lock_irqsave(&ctx->lock, flags);
430 finish_wait(&ctx->wq, &wait);
432 memset(&event, 0, sizeof(event));
433 event.header.process_element = ctx->pe;
434 event.header.size = sizeof(struct cxl_event_header);
435 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
436 pr_devel("afu_read delivering AFU driver specific event\n");
437 pl = ctx->afu_driver_ops->fetch_event(ctx);
438 atomic_dec(&ctx->afu_driver_events);
439 event.header.type = CXL_EVENT_AFU_DRIVER;
440 } else if (ctx->pending_irq) {
441 pr_devel("afu_read delivering AFU interrupt\n");
442 event.header.size += sizeof(struct cxl_event_afu_interrupt);
443 event.header.type = CXL_EVENT_AFU_INTERRUPT;
444 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
445 clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
446 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
447 ctx->pending_irq = false;
448 } else if (ctx->pending_fault) {
449 pr_devel("afu_read delivering data storage fault\n");
450 event.header.size += sizeof(struct cxl_event_data_storage);
451 event.header.type = CXL_EVENT_DATA_STORAGE;
452 event.fault.addr = ctx->fault_addr;
453 event.fault.dsisr = ctx->fault_dsisr;
454 ctx->pending_fault = false;
455 } else if (ctx->pending_afu_err) {
456 pr_devel("afu_read delivering afu error\n");
457 event.header.size += sizeof(struct cxl_event_afu_error);
458 event.header.type = CXL_EVENT_AFU_ERROR;
459 event.afu_error.error = ctx->afu_err;
460 ctx->pending_afu_err = false;
461 } else if (ctx->status == CLOSED) {
462 pr_devel("afu_read fatal error\n");
463 spin_unlock_irqrestore(&ctx->lock, flags);
466 WARN(1, "afu_read must be buggy\n");
468 spin_unlock_irqrestore(&ctx->lock, flags);
470 if (event.header.type == CXL_EVENT_AFU_DRIVER)
471 return afu_driver_event_copy(ctx, buf, &event, pl);
473 if (copy_to_user(buf, &event, event.header.size))
475 return event.header.size;
478 finish_wait(&ctx->wq, &wait);
479 spin_unlock_irqrestore(&ctx->lock, flags);
484 * Note: if this is updated, we need to update api.c to patch the new ones in
487 const struct file_operations afu_fops = {
488 .owner = THIS_MODULE,
492 .release = afu_release,
493 .unlocked_ioctl = afu_ioctl,
494 .compat_ioctl = afu_compat_ioctl,
498 static const struct file_operations afu_master_fops = {
499 .owner = THIS_MODULE,
500 .open = afu_master_open,
503 .release = afu_release,
504 .unlocked_ioctl = afu_ioctl,
505 .compat_ioctl = afu_compat_ioctl,
510 static char *cxl_devnode(struct device *dev, umode_t *mode)
512 if (cpu_has_feature(CPU_FTR_HVMODE) &&
513 CXL_DEVT_IS_CARD(dev->devt)) {
515 * These minor numbers will eventually be used to program the
516 * PSL and AFUs once we have dynamic reprogramming support
520 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
523 extern struct class *cxl_class;
525 static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
526 struct device **chardev, char *postfix, char *desc,
527 const struct file_operations *fops)
532 cdev_init(cdev, fops);
533 if ((rc = cdev_add(cdev, devt, 1))) {
534 dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
538 dev = device_create(cxl_class, &afu->dev, devt, afu,
539 "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
541 dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
554 int cxl_chardev_d_afu_add(struct cxl_afu *afu)
556 return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
557 &afu->chardev_d, "d", "dedicated",
558 &afu_master_fops); /* Uses master fops */
561 int cxl_chardev_m_afu_add(struct cxl_afu *afu)
563 return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
564 &afu->chardev_m, "m", "master",
568 int cxl_chardev_s_afu_add(struct cxl_afu *afu)
570 return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
571 &afu->chardev_s, "s", "shared",
575 void cxl_chardev_afu_remove(struct cxl_afu *afu)
577 if (afu->chardev_d) {
578 cdev_del(&afu->afu_cdev_d);
579 device_unregister(afu->chardev_d);
580 afu->chardev_d = NULL;
582 if (afu->chardev_m) {
583 cdev_del(&afu->afu_cdev_m);
584 device_unregister(afu->chardev_m);
585 afu->chardev_m = NULL;
587 if (afu->chardev_s) {
588 cdev_del(&afu->afu_cdev_s);
589 device_unregister(afu->chardev_s);
590 afu->chardev_s = NULL;
594 int cxl_register_afu(struct cxl_afu *afu)
596 afu->dev.class = cxl_class;
598 return device_register(&afu->dev);
601 int cxl_register_adapter(struct cxl *adapter)
603 adapter->dev.class = cxl_class;
606 * Future: When we support dynamically reprogramming the PSL & AFU we
607 * will expose the interface to do that via a chardev:
608 * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
611 return device_register(&adapter->dev);
614 dev_t cxl_get_dev(void)
619 int __init cxl_file_init(void)
624 * If these change we really need to update API. Either change some
625 * flags or update API version number CXL_API_VERSION.
627 BUILD_BUG_ON(CXL_API_VERSION != 3);
628 BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
629 BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
630 BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
631 BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
632 BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
634 if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
635 pr_err("Unable to allocate CXL major number: %i\n", rc);
639 pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
641 cxl_class = class_create(THIS_MODULE, "cxl");
642 if (IS_ERR(cxl_class)) {
643 pr_err("Unable to create CXL class\n");
644 rc = PTR_ERR(cxl_class);
647 cxl_class->devnode = cxl_devnode;
652 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
656 void cxl_file_exit(void)
658 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
659 class_destroy(cxl_class);