2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/bitmap.h>
15 #include <linux/sched.h>
16 #include <linux/poll.h>
17 #include <linux/pid.h>
20 #include <linux/slab.h>
21 #include <asm/cputable.h>
22 #include <asm/current.h>
23 #include <asm/copro.h>
27 #define CXL_NUM_MINORS 256 /* Total to reserve */
28 #define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
30 #define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
31 #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
32 #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
33 #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
34 #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
35 #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
36 #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
38 #define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
39 #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
41 #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
45 static struct class *cxl_class;
47 static int __afu_open(struct inode *inode, struct file *file, bool master)
51 struct cxl_context *ctx;
52 int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
53 int slice = CXL_DEVT_AFU(inode->i_rdev);
56 pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
58 if (!(adapter = get_cxl_adapter(adapter_num)))
61 if (slice > adapter->slices)
64 spin_lock(&adapter->afu_list_lock);
65 if (!(afu = adapter->afu[slice])) {
66 spin_unlock(&adapter->afu_list_lock);
69 get_device(&afu->dev);
70 spin_unlock(&adapter->afu_list_lock);
72 if (!afu->current_mode)
75 if (!(ctx = cxl_context_alloc())) {
80 if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
83 pr_devel("afu_open pe: %i\n", ctx->pe);
84 file->private_data = ctx;
87 /* Our ref on the AFU will now hold the adapter */
88 put_device(&adapter->dev);
93 put_device(&afu->dev);
95 put_device(&adapter->dev);
98 static int afu_open(struct inode *inode, struct file *file)
100 return __afu_open(inode, file, false);
103 static int afu_master_open(struct inode *inode, struct file *file)
105 return __afu_open(inode, file, true);
108 static int afu_release(struct inode *inode, struct file *file)
110 struct cxl_context *ctx = file->private_data;
112 pr_devel("%s: closing cxl file descriptor. pe: %i\n",
114 cxl_context_detach(ctx);
116 mutex_lock(&ctx->mapping_lock);
118 mutex_unlock(&ctx->mapping_lock);
120 put_device(&ctx->afu->dev);
123 * At this this point all bottom halfs have finished and we should be
124 * getting no more IRQs from the hardware for this context. Once it's
125 * removed from the IDR (and RCU synchronised) it's safe to free the
128 cxl_context_free(ctx);
134 static long afu_ioctl_start_work(struct cxl_context *ctx,
135 struct cxl_ioctl_start_work __user *uwork)
137 struct cxl_ioctl_start_work work;
141 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
143 mutex_lock(&ctx->status_mutex);
144 if (ctx->status != OPENED) {
149 if (copy_from_user(&work, uwork,
150 sizeof(struct cxl_ioctl_start_work))) {
156 * if any of the reserved fields are set or any of the unused
157 * flags are set it's invalid
159 if (work.reserved1 || work.reserved2 || work.reserved3 ||
160 work.reserved4 || work.reserved5 || work.reserved6 ||
161 (work.flags & ~CXL_START_WORK_ALL)) {
166 if (!(work.flags & CXL_START_WORK_NUM_IRQS))
167 work.num_interrupts = ctx->afu->pp_irqs;
168 else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
169 (work.num_interrupts > ctx->afu->irqs_max)) {
173 if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
176 if (work.flags & CXL_START_WORK_AMR)
177 amr = work.amr & mfspr(SPRN_UAMOR);
180 * We grab the PID here and not in the file open to allow for the case
181 * where a process (master, some daemon, etc) has opened the chardev on
182 * behalf of another process, so the AFU's mm gets bound to the process
183 * that performs this ioctl and not the process that opened the file.
185 ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
187 if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
191 ctx->status = STARTED;
194 mutex_unlock(&ctx->status_mutex);
197 static long afu_ioctl_process_element(struct cxl_context *ctx,
200 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
202 if (copy_to_user(upe, &ctx->pe, sizeof(__u32)))
208 static long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
210 struct cxl_context *ctx = file->private_data;
212 if (ctx->status == CLOSED)
215 pr_devel("afu_ioctl\n");
217 case CXL_IOCTL_START_WORK:
218 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
219 case CXL_IOCTL_GET_PROCESS_ELEMENT:
220 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
225 static long afu_compat_ioctl(struct file *file, unsigned int cmd,
228 return afu_ioctl(file, cmd, arg);
231 static int afu_mmap(struct file *file, struct vm_area_struct *vm)
233 struct cxl_context *ctx = file->private_data;
235 /* AFU must be started before we can MMIO */
236 if (ctx->status != STARTED)
239 return cxl_context_iomap(ctx, vm);
242 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
244 struct cxl_context *ctx = file->private_data;
249 poll_wait(file, &ctx->wq, poll);
251 pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
253 spin_lock_irqsave(&ctx->lock, flags);
254 if (ctx->pending_irq || ctx->pending_fault ||
255 ctx->pending_afu_err)
256 mask |= POLLIN | POLLRDNORM;
257 else if (ctx->status == CLOSED)
258 /* Only error on closed when there are no futher events pending
261 spin_unlock_irqrestore(&ctx->lock, flags);
263 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
268 static inline int ctx_event_pending(struct cxl_context *ctx)
270 return (ctx->pending_irq || ctx->pending_fault ||
271 ctx->pending_afu_err || (ctx->status == CLOSED));
274 static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
277 struct cxl_context *ctx = file->private_data;
278 struct cxl_event event;
283 if (count < CXL_READ_MIN_SIZE)
286 spin_lock_irqsave(&ctx->lock, flags);
289 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
290 if (ctx_event_pending(ctx))
293 if (file->f_flags & O_NONBLOCK) {
298 if (signal_pending(current)) {
303 spin_unlock_irqrestore(&ctx->lock, flags);
304 pr_devel("afu_read going to sleep...\n");
306 pr_devel("afu_read woken up\n");
307 spin_lock_irqsave(&ctx->lock, flags);
310 finish_wait(&ctx->wq, &wait);
312 memset(&event, 0, sizeof(event));
313 event.header.process_element = ctx->pe;
314 event.header.size = sizeof(struct cxl_event_header);
315 if (ctx->pending_irq) {
316 pr_devel("afu_read delivering AFU interrupt\n");
317 event.header.size += sizeof(struct cxl_event_afu_interrupt);
318 event.header.type = CXL_EVENT_AFU_INTERRUPT;
319 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
320 clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
321 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
322 ctx->pending_irq = false;
323 } else if (ctx->pending_fault) {
324 pr_devel("afu_read delivering data storage fault\n");
325 event.header.size += sizeof(struct cxl_event_data_storage);
326 event.header.type = CXL_EVENT_DATA_STORAGE;
327 event.fault.addr = ctx->fault_addr;
328 event.fault.dsisr = ctx->fault_dsisr;
329 ctx->pending_fault = false;
330 } else if (ctx->pending_afu_err) {
331 pr_devel("afu_read delivering afu error\n");
332 event.header.size += sizeof(struct cxl_event_afu_error);
333 event.header.type = CXL_EVENT_AFU_ERROR;
334 event.afu_error.error = ctx->afu_err;
335 ctx->pending_afu_err = false;
336 } else if (ctx->status == CLOSED) {
337 pr_devel("afu_read fatal error\n");
338 spin_unlock_irqrestore(&ctx->lock, flags);
341 WARN(1, "afu_read must be buggy\n");
343 spin_unlock_irqrestore(&ctx->lock, flags);
345 if (copy_to_user(buf, &event, event.header.size))
347 return event.header.size;
350 finish_wait(&ctx->wq, &wait);
351 spin_unlock_irqrestore(&ctx->lock, flags);
355 static const struct file_operations afu_fops = {
356 .owner = THIS_MODULE,
360 .release = afu_release,
361 .unlocked_ioctl = afu_ioctl,
362 .compat_ioctl = afu_compat_ioctl,
366 static const struct file_operations afu_master_fops = {
367 .owner = THIS_MODULE,
368 .open = afu_master_open,
371 .release = afu_release,
372 .unlocked_ioctl = afu_ioctl,
373 .compat_ioctl = afu_compat_ioctl,
378 static char *cxl_devnode(struct device *dev, umode_t *mode)
380 if (CXL_DEVT_IS_CARD(dev->devt)) {
382 * These minor numbers will eventually be used to program the
383 * PSL and AFUs once we have dynamic reprogramming support
387 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
390 extern struct class *cxl_class;
392 static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
393 struct device **chardev, char *postfix, char *desc,
394 const struct file_operations *fops)
399 cdev_init(cdev, fops);
400 if ((rc = cdev_add(cdev, devt, 1))) {
401 dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
405 dev = device_create(cxl_class, &afu->dev, devt, afu,
406 "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
408 dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
421 int cxl_chardev_d_afu_add(struct cxl_afu *afu)
423 return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
424 &afu->chardev_d, "d", "dedicated",
425 &afu_master_fops); /* Uses master fops */
428 int cxl_chardev_m_afu_add(struct cxl_afu *afu)
430 return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
431 &afu->chardev_m, "m", "master",
435 int cxl_chardev_s_afu_add(struct cxl_afu *afu)
437 return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
438 &afu->chardev_s, "s", "shared",
442 void cxl_chardev_afu_remove(struct cxl_afu *afu)
444 if (afu->chardev_d) {
445 cdev_del(&afu->afu_cdev_d);
446 device_unregister(afu->chardev_d);
447 afu->chardev_d = NULL;
449 if (afu->chardev_m) {
450 cdev_del(&afu->afu_cdev_m);
451 device_unregister(afu->chardev_m);
452 afu->chardev_m = NULL;
454 if (afu->chardev_s) {
455 cdev_del(&afu->afu_cdev_s);
456 device_unregister(afu->chardev_s);
457 afu->chardev_s = NULL;
461 int cxl_register_afu(struct cxl_afu *afu)
463 afu->dev.class = cxl_class;
465 return device_register(&afu->dev);
468 int cxl_register_adapter(struct cxl *adapter)
470 adapter->dev.class = cxl_class;
473 * Future: When we support dynamically reprogramming the PSL & AFU we
474 * will expose the interface to do that via a chardev:
475 * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
478 return device_register(&adapter->dev);
481 int __init cxl_file_init(void)
486 * If these change we really need to update API. Either change some
487 * flags or update API version number CXL_API_VERSION.
489 BUILD_BUG_ON(CXL_API_VERSION != 1);
490 BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
491 BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
492 BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
493 BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
494 BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
496 if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
497 pr_err("Unable to allocate CXL major number: %i\n", rc);
501 pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
503 cxl_class = class_create(THIS_MODULE, "cxl");
504 if (IS_ERR(cxl_class)) {
505 pr_err("Unable to create CXL class\n");
506 rc = PTR_ERR(cxl_class);
509 cxl_class->devnode = cxl_devnode;
514 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
518 void cxl_file_exit(void)
520 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
521 class_destroy(cxl_class);