Merge branch 'regulator-4.20' into regulator-next
[linux-2.6-block.git] / drivers / pci / switch / switchtec.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microsemi Switchtec(tm) PCIe Management Driver
4  * Copyright (c) 2017, Microsemi Corporation
5  */
6
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
9
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16
17 #include <linux/nospec.h>
18
19 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20 MODULE_VERSION("0.1");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Microsemi Corporation");
23
24 static int max_devices = 16;
25 module_param(max_devices, int, 0644);
26 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
27
28 static dev_t switchtec_devt;
29 static DEFINE_IDA(switchtec_minor_ida);
30
31 struct class *switchtec_class;
32 EXPORT_SYMBOL_GPL(switchtec_class);
33
34 enum mrpc_state {
35         MRPC_IDLE = 0,
36         MRPC_QUEUED,
37         MRPC_RUNNING,
38         MRPC_DONE,
39 };
40
41 struct switchtec_user {
42         struct switchtec_dev *stdev;
43
44         enum mrpc_state state;
45
46         struct completion comp;
47         struct kref kref;
48         struct list_head list;
49
50         u32 cmd;
51         u32 status;
52         u32 return_code;
53         size_t data_len;
54         size_t read_len;
55         unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
56         int event_cnt;
57 };
58
59 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
60 {
61         struct switchtec_user *stuser;
62
63         stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
64         if (!stuser)
65                 return ERR_PTR(-ENOMEM);
66
67         get_device(&stdev->dev);
68         stuser->stdev = stdev;
69         kref_init(&stuser->kref);
70         INIT_LIST_HEAD(&stuser->list);
71         init_completion(&stuser->comp);
72         stuser->event_cnt = atomic_read(&stdev->event_cnt);
73
74         dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
75
76         return stuser;
77 }
78
79 static void stuser_free(struct kref *kref)
80 {
81         struct switchtec_user *stuser;
82
83         stuser = container_of(kref, struct switchtec_user, kref);
84
85         dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
86
87         put_device(&stuser->stdev->dev);
88         kfree(stuser);
89 }
90
91 static void stuser_put(struct switchtec_user *stuser)
92 {
93         kref_put(&stuser->kref, stuser_free);
94 }
95
96 static void stuser_set_state(struct switchtec_user *stuser,
97                              enum mrpc_state state)
98 {
99         /* requires the mrpc_mutex to already be held when called */
100
101         const char * const state_names[] = {
102                 [MRPC_IDLE] = "IDLE",
103                 [MRPC_QUEUED] = "QUEUED",
104                 [MRPC_RUNNING] = "RUNNING",
105                 [MRPC_DONE] = "DONE",
106         };
107
108         stuser->state = state;
109
110         dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
111                 stuser, state_names[state]);
112 }
113
114 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
115
116 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
117 {
118         /* requires the mrpc_mutex to already be held when called */
119
120         struct switchtec_user *stuser;
121
122         if (stdev->mrpc_busy)
123                 return;
124
125         if (list_empty(&stdev->mrpc_queue))
126                 return;
127
128         stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
129                             list);
130
131         stuser_set_state(stuser, MRPC_RUNNING);
132         stdev->mrpc_busy = 1;
133         memcpy_toio(&stdev->mmio_mrpc->input_data,
134                     stuser->data, stuser->data_len);
135         iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
136
137         stuser->status = ioread32(&stdev->mmio_mrpc->status);
138         if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
139                 mrpc_complete_cmd(stdev);
140
141         schedule_delayed_work(&stdev->mrpc_timeout,
142                               msecs_to_jiffies(500));
143 }
144
145 static int mrpc_queue_cmd(struct switchtec_user *stuser)
146 {
147         /* requires the mrpc_mutex to already be held when called */
148
149         struct switchtec_dev *stdev = stuser->stdev;
150
151         kref_get(&stuser->kref);
152         stuser->read_len = sizeof(stuser->data);
153         stuser_set_state(stuser, MRPC_QUEUED);
154         init_completion(&stuser->comp);
155         list_add_tail(&stuser->list, &stdev->mrpc_queue);
156
157         mrpc_cmd_submit(stdev);
158
159         return 0;
160 }
161
162 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
163 {
164         /* requires the mrpc_mutex to already be held when called */
165         struct switchtec_user *stuser;
166
167         if (list_empty(&stdev->mrpc_queue))
168                 return;
169
170         stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
171                             list);
172
173         stuser->status = ioread32(&stdev->mmio_mrpc->status);
174         if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
175                 return;
176
177         stuser_set_state(stuser, MRPC_DONE);
178         stuser->return_code = 0;
179
180         if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
181                 goto out;
182
183         stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
184         if (stuser->return_code != 0)
185                 goto out;
186
187         memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
188                       stuser->read_len);
189
190 out:
191         complete_all(&stuser->comp);
192         list_del_init(&stuser->list);
193         stuser_put(stuser);
194         stdev->mrpc_busy = 0;
195
196         mrpc_cmd_submit(stdev);
197 }
198
199 static void mrpc_event_work(struct work_struct *work)
200 {
201         struct switchtec_dev *stdev;
202
203         stdev = container_of(work, struct switchtec_dev, mrpc_work);
204
205         dev_dbg(&stdev->dev, "%s\n", __func__);
206
207         mutex_lock(&stdev->mrpc_mutex);
208         cancel_delayed_work(&stdev->mrpc_timeout);
209         mrpc_complete_cmd(stdev);
210         mutex_unlock(&stdev->mrpc_mutex);
211 }
212
213 static void mrpc_timeout_work(struct work_struct *work)
214 {
215         struct switchtec_dev *stdev;
216         u32 status;
217
218         stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
219
220         dev_dbg(&stdev->dev, "%s\n", __func__);
221
222         mutex_lock(&stdev->mrpc_mutex);
223
224         status = ioread32(&stdev->mmio_mrpc->status);
225         if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
226                 schedule_delayed_work(&stdev->mrpc_timeout,
227                                       msecs_to_jiffies(500));
228                 goto out;
229         }
230
231         mrpc_complete_cmd(stdev);
232
233 out:
234         mutex_unlock(&stdev->mrpc_mutex);
235 }
236
237 static ssize_t device_version_show(struct device *dev,
238         struct device_attribute *attr, char *buf)
239 {
240         struct switchtec_dev *stdev = to_stdev(dev);
241         u32 ver;
242
243         ver = ioread32(&stdev->mmio_sys_info->device_version);
244
245         return sprintf(buf, "%x\n", ver);
246 }
247 static DEVICE_ATTR_RO(device_version);
248
249 static ssize_t fw_version_show(struct device *dev,
250         struct device_attribute *attr, char *buf)
251 {
252         struct switchtec_dev *stdev = to_stdev(dev);
253         u32 ver;
254
255         ver = ioread32(&stdev->mmio_sys_info->firmware_version);
256
257         return sprintf(buf, "%08x\n", ver);
258 }
259 static DEVICE_ATTR_RO(fw_version);
260
261 static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
262 {
263         int i;
264
265         memcpy_fromio(buf, attr, len);
266         buf[len] = '\n';
267         buf[len + 1] = 0;
268
269         for (i = len - 1; i > 0; i--) {
270                 if (buf[i] != ' ')
271                         break;
272                 buf[i] = '\n';
273                 buf[i + 1] = 0;
274         }
275
276         return strlen(buf);
277 }
278
279 #define DEVICE_ATTR_SYS_INFO_STR(field) \
280 static ssize_t field ## _show(struct device *dev, \
281         struct device_attribute *attr, char *buf) \
282 { \
283         struct switchtec_dev *stdev = to_stdev(dev); \
284         return io_string_show(buf, &stdev->mmio_sys_info->field, \
285                             sizeof(stdev->mmio_sys_info->field)); \
286 } \
287 \
288 static DEVICE_ATTR_RO(field)
289
290 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
291 DEVICE_ATTR_SYS_INFO_STR(product_id);
292 DEVICE_ATTR_SYS_INFO_STR(product_revision);
293 DEVICE_ATTR_SYS_INFO_STR(component_vendor);
294
295 static ssize_t component_id_show(struct device *dev,
296         struct device_attribute *attr, char *buf)
297 {
298         struct switchtec_dev *stdev = to_stdev(dev);
299         int id = ioread16(&stdev->mmio_sys_info->component_id);
300
301         return sprintf(buf, "PM%04X\n", id);
302 }
303 static DEVICE_ATTR_RO(component_id);
304
305 static ssize_t component_revision_show(struct device *dev,
306         struct device_attribute *attr, char *buf)
307 {
308         struct switchtec_dev *stdev = to_stdev(dev);
309         int rev = ioread8(&stdev->mmio_sys_info->component_revision);
310
311         return sprintf(buf, "%d\n", rev);
312 }
313 static DEVICE_ATTR_RO(component_revision);
314
315 static ssize_t partition_show(struct device *dev,
316         struct device_attribute *attr, char *buf)
317 {
318         struct switchtec_dev *stdev = to_stdev(dev);
319
320         return sprintf(buf, "%d\n", stdev->partition);
321 }
322 static DEVICE_ATTR_RO(partition);
323
324 static ssize_t partition_count_show(struct device *dev,
325         struct device_attribute *attr, char *buf)
326 {
327         struct switchtec_dev *stdev = to_stdev(dev);
328
329         return sprintf(buf, "%d\n", stdev->partition_count);
330 }
331 static DEVICE_ATTR_RO(partition_count);
332
333 static struct attribute *switchtec_device_attrs[] = {
334         &dev_attr_device_version.attr,
335         &dev_attr_fw_version.attr,
336         &dev_attr_vendor_id.attr,
337         &dev_attr_product_id.attr,
338         &dev_attr_product_revision.attr,
339         &dev_attr_component_vendor.attr,
340         &dev_attr_component_id.attr,
341         &dev_attr_component_revision.attr,
342         &dev_attr_partition.attr,
343         &dev_attr_partition_count.attr,
344         NULL,
345 };
346
347 ATTRIBUTE_GROUPS(switchtec_device);
348
349 static int switchtec_dev_open(struct inode *inode, struct file *filp)
350 {
351         struct switchtec_dev *stdev;
352         struct switchtec_user *stuser;
353
354         stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
355
356         stuser = stuser_create(stdev);
357         if (IS_ERR(stuser))
358                 return PTR_ERR(stuser);
359
360         filp->private_data = stuser;
361         nonseekable_open(inode, filp);
362
363         dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
364
365         return 0;
366 }
367
368 static int switchtec_dev_release(struct inode *inode, struct file *filp)
369 {
370         struct switchtec_user *stuser = filp->private_data;
371
372         stuser_put(stuser);
373
374         return 0;
375 }
376
377 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
378 {
379         if (mutex_lock_interruptible(&stdev->mrpc_mutex))
380                 return -EINTR;
381
382         if (!stdev->alive) {
383                 mutex_unlock(&stdev->mrpc_mutex);
384                 return -ENODEV;
385         }
386
387         return 0;
388 }
389
390 static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
391                                    size_t size, loff_t *off)
392 {
393         struct switchtec_user *stuser = filp->private_data;
394         struct switchtec_dev *stdev = stuser->stdev;
395         int rc;
396
397         if (size < sizeof(stuser->cmd) ||
398             size > sizeof(stuser->cmd) + sizeof(stuser->data))
399                 return -EINVAL;
400
401         stuser->data_len = size - sizeof(stuser->cmd);
402
403         rc = lock_mutex_and_test_alive(stdev);
404         if (rc)
405                 return rc;
406
407         if (stuser->state != MRPC_IDLE) {
408                 rc = -EBADE;
409                 goto out;
410         }
411
412         rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
413         if (rc) {
414                 rc = -EFAULT;
415                 goto out;
416         }
417
418         data += sizeof(stuser->cmd);
419         rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
420         if (rc) {
421                 rc = -EFAULT;
422                 goto out;
423         }
424
425         rc = mrpc_queue_cmd(stuser);
426
427 out:
428         mutex_unlock(&stdev->mrpc_mutex);
429
430         if (rc)
431                 return rc;
432
433         return size;
434 }
435
436 static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
437                                   size_t size, loff_t *off)
438 {
439         struct switchtec_user *stuser = filp->private_data;
440         struct switchtec_dev *stdev = stuser->stdev;
441         int rc;
442
443         if (size < sizeof(stuser->cmd) ||
444             size > sizeof(stuser->cmd) + sizeof(stuser->data))
445                 return -EINVAL;
446
447         rc = lock_mutex_and_test_alive(stdev);
448         if (rc)
449                 return rc;
450
451         if (stuser->state == MRPC_IDLE) {
452                 mutex_unlock(&stdev->mrpc_mutex);
453                 return -EBADE;
454         }
455
456         stuser->read_len = size - sizeof(stuser->return_code);
457
458         mutex_unlock(&stdev->mrpc_mutex);
459
460         if (filp->f_flags & O_NONBLOCK) {
461                 if (!try_wait_for_completion(&stuser->comp))
462                         return -EAGAIN;
463         } else {
464                 rc = wait_for_completion_interruptible(&stuser->comp);
465                 if (rc < 0)
466                         return rc;
467         }
468
469         rc = lock_mutex_and_test_alive(stdev);
470         if (rc)
471                 return rc;
472
473         if (stuser->state != MRPC_DONE) {
474                 mutex_unlock(&stdev->mrpc_mutex);
475                 return -EBADE;
476         }
477
478         rc = copy_to_user(data, &stuser->return_code,
479                           sizeof(stuser->return_code));
480         if (rc) {
481                 rc = -EFAULT;
482                 goto out;
483         }
484
485         data += sizeof(stuser->return_code);
486         rc = copy_to_user(data, &stuser->data,
487                           size - sizeof(stuser->return_code));
488         if (rc) {
489                 rc = -EFAULT;
490                 goto out;
491         }
492
493         stuser_set_state(stuser, MRPC_IDLE);
494
495 out:
496         mutex_unlock(&stdev->mrpc_mutex);
497
498         if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
499                 return size;
500         else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
501                 return -ENXIO;
502         else
503                 return -EBADMSG;
504 }
505
506 static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
507 {
508         struct switchtec_user *stuser = filp->private_data;
509         struct switchtec_dev *stdev = stuser->stdev;
510         __poll_t ret = 0;
511
512         poll_wait(filp, &stuser->comp.wait, wait);
513         poll_wait(filp, &stdev->event_wq, wait);
514
515         if (lock_mutex_and_test_alive(stdev))
516                 return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
517
518         mutex_unlock(&stdev->mrpc_mutex);
519
520         if (try_wait_for_completion(&stuser->comp))
521                 ret |= EPOLLIN | EPOLLRDNORM;
522
523         if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
524                 ret |= EPOLLPRI | EPOLLRDBAND;
525
526         return ret;
527 }
528
529 static int ioctl_flash_info(struct switchtec_dev *stdev,
530                             struct switchtec_ioctl_flash_info __user *uinfo)
531 {
532         struct switchtec_ioctl_flash_info info = {0};
533         struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
534
535         info.flash_length = ioread32(&fi->flash_length);
536         info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
537
538         if (copy_to_user(uinfo, &info, sizeof(info)))
539                 return -EFAULT;
540
541         return 0;
542 }
543
544 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
545                              struct partition_info __iomem *pi)
546 {
547         info->address = ioread32(&pi->address);
548         info->length = ioread32(&pi->length);
549 }
550
551 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
552         struct switchtec_ioctl_flash_part_info __user *uinfo)
553 {
554         struct switchtec_ioctl_flash_part_info info = {0};
555         struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
556         struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
557         u32 active_addr = -1;
558
559         if (copy_from_user(&info, uinfo, sizeof(info)))
560                 return -EFAULT;
561
562         switch (info.flash_partition) {
563         case SWITCHTEC_IOCTL_PART_CFG0:
564                 active_addr = ioread32(&fi->active_cfg);
565                 set_fw_info_part(&info, &fi->cfg0);
566                 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
567                         info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
568                 break;
569         case SWITCHTEC_IOCTL_PART_CFG1:
570                 active_addr = ioread32(&fi->active_cfg);
571                 set_fw_info_part(&info, &fi->cfg1);
572                 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
573                         info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
574                 break;
575         case SWITCHTEC_IOCTL_PART_IMG0:
576                 active_addr = ioread32(&fi->active_img);
577                 set_fw_info_part(&info, &fi->img0);
578                 if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
579                         info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
580                 break;
581         case SWITCHTEC_IOCTL_PART_IMG1:
582                 active_addr = ioread32(&fi->active_img);
583                 set_fw_info_part(&info, &fi->img1);
584                 if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
585                         info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
586                 break;
587         case SWITCHTEC_IOCTL_PART_NVLOG:
588                 set_fw_info_part(&info, &fi->nvlog);
589                 break;
590         case SWITCHTEC_IOCTL_PART_VENDOR0:
591                 set_fw_info_part(&info, &fi->vendor[0]);
592                 break;
593         case SWITCHTEC_IOCTL_PART_VENDOR1:
594                 set_fw_info_part(&info, &fi->vendor[1]);
595                 break;
596         case SWITCHTEC_IOCTL_PART_VENDOR2:
597                 set_fw_info_part(&info, &fi->vendor[2]);
598                 break;
599         case SWITCHTEC_IOCTL_PART_VENDOR3:
600                 set_fw_info_part(&info, &fi->vendor[3]);
601                 break;
602         case SWITCHTEC_IOCTL_PART_VENDOR4:
603                 set_fw_info_part(&info, &fi->vendor[4]);
604                 break;
605         case SWITCHTEC_IOCTL_PART_VENDOR5:
606                 set_fw_info_part(&info, &fi->vendor[5]);
607                 break;
608         case SWITCHTEC_IOCTL_PART_VENDOR6:
609                 set_fw_info_part(&info, &fi->vendor[6]);
610                 break;
611         case SWITCHTEC_IOCTL_PART_VENDOR7:
612                 set_fw_info_part(&info, &fi->vendor[7]);
613                 break;
614         default:
615                 return -EINVAL;
616         }
617
618         if (info.address == active_addr)
619                 info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
620
621         if (copy_to_user(uinfo, &info, sizeof(info)))
622                 return -EFAULT;
623
624         return 0;
625 }
626
627 static int ioctl_event_summary(struct switchtec_dev *stdev,
628         struct switchtec_user *stuser,
629         struct switchtec_ioctl_event_summary __user *usum)
630 {
631         struct switchtec_ioctl_event_summary s = {0};
632         int i;
633         u32 reg;
634
635         s.global = ioread32(&stdev->mmio_sw_event->global_summary);
636         s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
637         s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
638
639         for (i = 0; i < stdev->partition_count; i++) {
640                 reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
641                 s.part[i] = reg;
642         }
643
644         for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
645                 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
646                 if (reg != PCI_VENDOR_ID_MICROSEMI)
647                         break;
648
649                 reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
650                 s.pff[i] = reg;
651         }
652
653         if (copy_to_user(usum, &s, sizeof(s)))
654                 return -EFAULT;
655
656         stuser->event_cnt = atomic_read(&stdev->event_cnt);
657
658         return 0;
659 }
660
661 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
662                                   size_t offset, int index)
663 {
664         return (void __iomem *)stdev->mmio_sw_event + offset;
665 }
666
667 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
668                                 size_t offset, int index)
669 {
670         return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
671 }
672
673 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
674                                size_t offset, int index)
675 {
676         return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
677 }
678
679 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
680 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
681 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
682
683 static const struct event_reg {
684         size_t offset;
685         u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
686                                 size_t offset, int index);
687 } event_regs[] = {
688         EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
689         EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
690         EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
691         EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
692         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
693         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
694         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
695         EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
696         EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
697         EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
698                twi_mrpc_comp_async_hdr),
699         EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
700         EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
701                cli_mrpc_comp_async_hdr),
702         EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
703         EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
704         EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
705         EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
706         EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
707         EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
708         EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
709         EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
710         EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
711         EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
712         EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
713         EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
714         EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
715         EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
716         EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
717         EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
718         EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
719         EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
720 };
721
722 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
723                                    int event_id, int index)
724 {
725         size_t off;
726
727         if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
728                 return ERR_PTR(-EINVAL);
729
730         off = event_regs[event_id].offset;
731
732         if (event_regs[event_id].map_reg == part_ev_reg) {
733                 if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
734                         index = stdev->partition;
735                 else if (index < 0 || index >= stdev->partition_count)
736                         return ERR_PTR(-EINVAL);
737         } else if (event_regs[event_id].map_reg == pff_ev_reg) {
738                 if (index < 0 || index >= stdev->pff_csr_count)
739                         return ERR_PTR(-EINVAL);
740         }
741
742         return event_regs[event_id].map_reg(stdev, off, index);
743 }
744
745 static int event_ctl(struct switchtec_dev *stdev,
746                      struct switchtec_ioctl_event_ctl *ctl)
747 {
748         int i;
749         u32 __iomem *reg;
750         u32 hdr;
751
752         reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
753         if (IS_ERR(reg))
754                 return PTR_ERR(reg);
755
756         hdr = ioread32(reg);
757         for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
758                 ctl->data[i] = ioread32(&reg[i + 1]);
759
760         ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
761         ctl->count = (hdr >> 5) & 0xFF;
762
763         if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
764                 hdr &= ~SWITCHTEC_EVENT_CLEAR;
765         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
766                 hdr |= SWITCHTEC_EVENT_EN_IRQ;
767         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
768                 hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
769         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
770                 hdr |= SWITCHTEC_EVENT_EN_LOG;
771         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
772                 hdr &= ~SWITCHTEC_EVENT_EN_LOG;
773         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
774                 hdr |= SWITCHTEC_EVENT_EN_CLI;
775         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
776                 hdr &= ~SWITCHTEC_EVENT_EN_CLI;
777         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
778                 hdr |= SWITCHTEC_EVENT_FATAL;
779         if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
780                 hdr &= ~SWITCHTEC_EVENT_FATAL;
781
782         if (ctl->flags)
783                 iowrite32(hdr, reg);
784
785         ctl->flags = 0;
786         if (hdr & SWITCHTEC_EVENT_EN_IRQ)
787                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
788         if (hdr & SWITCHTEC_EVENT_EN_LOG)
789                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
790         if (hdr & SWITCHTEC_EVENT_EN_CLI)
791                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
792         if (hdr & SWITCHTEC_EVENT_FATAL)
793                 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
794
795         return 0;
796 }
797
798 static int ioctl_event_ctl(struct switchtec_dev *stdev,
799         struct switchtec_ioctl_event_ctl __user *uctl)
800 {
801         int ret;
802         int nr_idxs;
803         struct switchtec_ioctl_event_ctl ctl;
804
805         if (copy_from_user(&ctl, uctl, sizeof(ctl)))
806                 return -EFAULT;
807
808         if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
809                 return -EINVAL;
810
811         if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
812                 return -EINVAL;
813
814         if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
815                 if (event_regs[ctl.event_id].map_reg == global_ev_reg)
816                         nr_idxs = 1;
817                 else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
818                         nr_idxs = stdev->partition_count;
819                 else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
820                         nr_idxs = stdev->pff_csr_count;
821                 else
822                         return -EINVAL;
823
824                 for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
825                         ret = event_ctl(stdev, &ctl);
826                         if (ret < 0)
827                                 return ret;
828                 }
829         } else {
830                 ret = event_ctl(stdev, &ctl);
831                 if (ret < 0)
832                         return ret;
833         }
834
835         if (copy_to_user(uctl, &ctl, sizeof(ctl)))
836                 return -EFAULT;
837
838         return 0;
839 }
840
841 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
842                              struct switchtec_ioctl_pff_port *up)
843 {
844         int i, part;
845         u32 reg;
846         struct part_cfg_regs *pcfg;
847         struct switchtec_ioctl_pff_port p;
848
849         if (copy_from_user(&p, up, sizeof(p)))
850                 return -EFAULT;
851
852         p.port = -1;
853         for (part = 0; part < stdev->partition_count; part++) {
854                 pcfg = &stdev->mmio_part_cfg_all[part];
855                 p.partition = part;
856
857                 reg = ioread32(&pcfg->usp_pff_inst_id);
858                 if (reg == p.pff) {
859                         p.port = 0;
860                         break;
861                 }
862
863                 reg = ioread32(&pcfg->vep_pff_inst_id);
864                 if (reg == p.pff) {
865                         p.port = SWITCHTEC_IOCTL_PFF_VEP;
866                         break;
867                 }
868
869                 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
870                         reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
871                         if (reg != p.pff)
872                                 continue;
873
874                         p.port = i + 1;
875                         break;
876                 }
877
878                 if (p.port != -1)
879                         break;
880         }
881
882         if (copy_to_user(up, &p, sizeof(p)))
883                 return -EFAULT;
884
885         return 0;
886 }
887
888 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
889                              struct switchtec_ioctl_pff_port *up)
890 {
891         struct switchtec_ioctl_pff_port p;
892         struct part_cfg_regs *pcfg;
893
894         if (copy_from_user(&p, up, sizeof(p)))
895                 return -EFAULT;
896
897         if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
898                 pcfg = stdev->mmio_part_cfg;
899         else if (p.partition < stdev->partition_count)
900                 pcfg = &stdev->mmio_part_cfg_all[p.partition];
901         else
902                 return -EINVAL;
903
904         switch (p.port) {
905         case 0:
906                 p.pff = ioread32(&pcfg->usp_pff_inst_id);
907                 break;
908         case SWITCHTEC_IOCTL_PFF_VEP:
909                 p.pff = ioread32(&pcfg->vep_pff_inst_id);
910                 break;
911         default:
912                 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
913                         return -EINVAL;
914                 p.port = array_index_nospec(p.port,
915                                         ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
916                 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
917                 break;
918         }
919
920         if (copy_to_user(up, &p, sizeof(p)))
921                 return -EFAULT;
922
923         return 0;
924 }
925
926 static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
927                                 unsigned long arg)
928 {
929         struct switchtec_user *stuser = filp->private_data;
930         struct switchtec_dev *stdev = stuser->stdev;
931         int rc;
932         void __user *argp = (void __user *)arg;
933
934         rc = lock_mutex_and_test_alive(stdev);
935         if (rc)
936                 return rc;
937
938         switch (cmd) {
939         case SWITCHTEC_IOCTL_FLASH_INFO:
940                 rc = ioctl_flash_info(stdev, argp);
941                 break;
942         case SWITCHTEC_IOCTL_FLASH_PART_INFO:
943                 rc = ioctl_flash_part_info(stdev, argp);
944                 break;
945         case SWITCHTEC_IOCTL_EVENT_SUMMARY:
946                 rc = ioctl_event_summary(stdev, stuser, argp);
947                 break;
948         case SWITCHTEC_IOCTL_EVENT_CTL:
949                 rc = ioctl_event_ctl(stdev, argp);
950                 break;
951         case SWITCHTEC_IOCTL_PFF_TO_PORT:
952                 rc = ioctl_pff_to_port(stdev, argp);
953                 break;
954         case SWITCHTEC_IOCTL_PORT_TO_PFF:
955                 rc = ioctl_port_to_pff(stdev, argp);
956                 break;
957         default:
958                 rc = -ENOTTY;
959                 break;
960         }
961
962         mutex_unlock(&stdev->mrpc_mutex);
963         return rc;
964 }
965
966 static const struct file_operations switchtec_fops = {
967         .owner = THIS_MODULE,
968         .open = switchtec_dev_open,
969         .release = switchtec_dev_release,
970         .write = switchtec_dev_write,
971         .read = switchtec_dev_read,
972         .poll = switchtec_dev_poll,
973         .unlocked_ioctl = switchtec_dev_ioctl,
974         .compat_ioctl = switchtec_dev_ioctl,
975 };
976
977 static void link_event_work(struct work_struct *work)
978 {
979         struct switchtec_dev *stdev;
980
981         stdev = container_of(work, struct switchtec_dev, link_event_work);
982
983         if (stdev->link_notifier)
984                 stdev->link_notifier(stdev);
985 }
986
987 static void check_link_state_events(struct switchtec_dev *stdev)
988 {
989         int idx;
990         u32 reg;
991         int count;
992         int occurred = 0;
993
994         for (idx = 0; idx < stdev->pff_csr_count; idx++) {
995                 reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
996                 dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
997                 count = (reg >> 5) & 0xFF;
998
999                 if (count != stdev->link_event_count[idx]) {
1000                         occurred = 1;
1001                         stdev->link_event_count[idx] = count;
1002                 }
1003         }
1004
1005         if (occurred)
1006                 schedule_work(&stdev->link_event_work);
1007 }
1008
1009 static void enable_link_state_events(struct switchtec_dev *stdev)
1010 {
1011         int idx;
1012
1013         for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1014                 iowrite32(SWITCHTEC_EVENT_CLEAR |
1015                           SWITCHTEC_EVENT_EN_IRQ,
1016                           &stdev->mmio_pff_csr[idx].link_state_hdr);
1017         }
1018 }
1019
1020 static void stdev_release(struct device *dev)
1021 {
1022         struct switchtec_dev *stdev = to_stdev(dev);
1023
1024         kfree(stdev);
1025 }
1026
1027 static void stdev_kill(struct switchtec_dev *stdev)
1028 {
1029         struct switchtec_user *stuser, *tmpuser;
1030
1031         pci_clear_master(stdev->pdev);
1032
1033         cancel_delayed_work_sync(&stdev->mrpc_timeout);
1034
1035         /* Mark the hardware as unavailable and complete all completions */
1036         mutex_lock(&stdev->mrpc_mutex);
1037         stdev->alive = false;
1038
1039         /* Wake up and kill any users waiting on an MRPC request */
1040         list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1041                 complete_all(&stuser->comp);
1042                 list_del_init(&stuser->list);
1043                 stuser_put(stuser);
1044         }
1045
1046         mutex_unlock(&stdev->mrpc_mutex);
1047
1048         /* Wake up any users waiting on event_wq */
1049         wake_up_interruptible(&stdev->event_wq);
1050 }
1051
1052 static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1053 {
1054         struct switchtec_dev *stdev;
1055         int minor;
1056         struct device *dev;
1057         struct cdev *cdev;
1058         int rc;
1059
1060         stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1061                              dev_to_node(&pdev->dev));
1062         if (!stdev)
1063                 return ERR_PTR(-ENOMEM);
1064
1065         stdev->alive = true;
1066         stdev->pdev = pdev;
1067         INIT_LIST_HEAD(&stdev->mrpc_queue);
1068         mutex_init(&stdev->mrpc_mutex);
1069         stdev->mrpc_busy = 0;
1070         INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1071         INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1072         INIT_WORK(&stdev->link_event_work, link_event_work);
1073         init_waitqueue_head(&stdev->event_wq);
1074         atomic_set(&stdev->event_cnt, 0);
1075
1076         dev = &stdev->dev;
1077         device_initialize(dev);
1078         dev->class = switchtec_class;
1079         dev->parent = &pdev->dev;
1080         dev->groups = switchtec_device_groups;
1081         dev->release = stdev_release;
1082
1083         minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1084                                GFP_KERNEL);
1085         if (minor < 0) {
1086                 rc = minor;
1087                 goto err_put;
1088         }
1089
1090         dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1091         dev_set_name(dev, "switchtec%d", minor);
1092
1093         cdev = &stdev->cdev;
1094         cdev_init(cdev, &switchtec_fops);
1095         cdev->owner = THIS_MODULE;
1096
1097         return stdev;
1098
1099 err_put:
1100         put_device(&stdev->dev);
1101         return ERR_PTR(rc);
1102 }
1103
1104 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1105 {
1106         size_t off = event_regs[eid].offset;
1107         u32 __iomem *hdr_reg;
1108         u32 hdr;
1109
1110         hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1111         hdr = ioread32(hdr_reg);
1112
1113         if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1114                 return 0;
1115
1116         if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
1117                 return 0;
1118
1119         dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1120         hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1121         iowrite32(hdr, hdr_reg);
1122
1123         return 1;
1124 }
1125
1126 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1127 {
1128         int idx;
1129         int count = 0;
1130
1131         if (event_regs[eid].map_reg == part_ev_reg) {
1132                 for (idx = 0; idx < stdev->partition_count; idx++)
1133                         count += mask_event(stdev, eid, idx);
1134         } else if (event_regs[eid].map_reg == pff_ev_reg) {
1135                 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1136                         if (!stdev->pff_local[idx])
1137                                 continue;
1138
1139                         count += mask_event(stdev, eid, idx);
1140                 }
1141         } else {
1142                 count += mask_event(stdev, eid, 0);
1143         }
1144
1145         return count;
1146 }
1147
1148 static irqreturn_t switchtec_event_isr(int irq, void *dev)
1149 {
1150         struct switchtec_dev *stdev = dev;
1151         u32 reg;
1152         irqreturn_t ret = IRQ_NONE;
1153         int eid, event_count = 0;
1154
1155         reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1156         if (reg & SWITCHTEC_EVENT_OCCURRED) {
1157                 dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1158                 ret = IRQ_HANDLED;
1159                 schedule_work(&stdev->mrpc_work);
1160                 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1161         }
1162
1163         check_link_state_events(stdev);
1164
1165         for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1166                 event_count += mask_all_events(stdev, eid);
1167
1168         if (event_count) {
1169                 atomic_inc(&stdev->event_cnt);
1170                 wake_up_interruptible(&stdev->event_wq);
1171                 dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1172                         event_count);
1173                 return IRQ_HANDLED;
1174         }
1175
1176         return ret;
1177 }
1178
1179 static int switchtec_init_isr(struct switchtec_dev *stdev)
1180 {
1181         int nvecs;
1182         int event_irq;
1183
1184         nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
1185                                       PCI_IRQ_MSIX | PCI_IRQ_MSI);
1186         if (nvecs < 0)
1187                 return nvecs;
1188
1189         event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
1190         if (event_irq < 0 || event_irq >= nvecs)
1191                 return -EFAULT;
1192
1193         event_irq = pci_irq_vector(stdev->pdev, event_irq);
1194         if (event_irq < 0)
1195                 return event_irq;
1196
1197         return devm_request_irq(&stdev->pdev->dev, event_irq,
1198                                 switchtec_event_isr, 0,
1199                                 KBUILD_MODNAME, stdev);
1200 }
1201
1202 static void init_pff(struct switchtec_dev *stdev)
1203 {
1204         int i;
1205         u32 reg;
1206         struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1207
1208         for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1209                 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1210                 if (reg != PCI_VENDOR_ID_MICROSEMI)
1211                         break;
1212         }
1213
1214         stdev->pff_csr_count = i;
1215
1216         reg = ioread32(&pcfg->usp_pff_inst_id);
1217         if (reg < SWITCHTEC_MAX_PFF_CSR)
1218                 stdev->pff_local[reg] = 1;
1219
1220         reg = ioread32(&pcfg->vep_pff_inst_id);
1221         if (reg < SWITCHTEC_MAX_PFF_CSR)
1222                 stdev->pff_local[reg] = 1;
1223
1224         for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1225                 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1226                 if (reg < SWITCHTEC_MAX_PFF_CSR)
1227                         stdev->pff_local[reg] = 1;
1228         }
1229 }
1230
1231 static int switchtec_init_pci(struct switchtec_dev *stdev,
1232                               struct pci_dev *pdev)
1233 {
1234         int rc;
1235
1236         rc = pcim_enable_device(pdev);
1237         if (rc)
1238                 return rc;
1239
1240         rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
1241         if (rc)
1242                 return rc;
1243
1244         pci_set_master(pdev);
1245
1246         stdev->mmio = pcim_iomap_table(pdev)[0];
1247         stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
1248         stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1249         stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1250         stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1251         stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1252         stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1253         stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1254         stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1255         stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1256         stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1257
1258         if (stdev->partition_count < 1)
1259                 stdev->partition_count = 1;
1260
1261         init_pff(stdev);
1262
1263         pci_set_drvdata(pdev, stdev);
1264
1265         return 0;
1266 }
1267
1268 static int switchtec_pci_probe(struct pci_dev *pdev,
1269                                const struct pci_device_id *id)
1270 {
1271         struct switchtec_dev *stdev;
1272         int rc;
1273
1274         if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
1275                 request_module_nowait("ntb_hw_switchtec");
1276
1277         stdev = stdev_create(pdev);
1278         if (IS_ERR(stdev))
1279                 return PTR_ERR(stdev);
1280
1281         rc = switchtec_init_pci(stdev, pdev);
1282         if (rc)
1283                 goto err_put;
1284
1285         rc = switchtec_init_isr(stdev);
1286         if (rc) {
1287                 dev_err(&stdev->dev, "failed to init isr.\n");
1288                 goto err_put;
1289         }
1290
1291         iowrite32(SWITCHTEC_EVENT_CLEAR |
1292                   SWITCHTEC_EVENT_EN_IRQ,
1293                   &stdev->mmio_part_cfg->mrpc_comp_hdr);
1294         enable_link_state_events(stdev);
1295
1296         rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1297         if (rc)
1298                 goto err_devadd;
1299
1300         dev_info(&stdev->dev, "Management device registered.\n");
1301
1302         return 0;
1303
1304 err_devadd:
1305         stdev_kill(stdev);
1306 err_put:
1307         ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1308         put_device(&stdev->dev);
1309         return rc;
1310 }
1311
1312 static void switchtec_pci_remove(struct pci_dev *pdev)
1313 {
1314         struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1315
1316         pci_set_drvdata(pdev, NULL);
1317
1318         cdev_device_del(&stdev->cdev, &stdev->dev);
1319         ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1320         dev_info(&stdev->dev, "unregistered.\n");
1321
1322         stdev_kill(stdev);
1323         put_device(&stdev->dev);
1324 }
1325
1326 #define SWITCHTEC_PCI_DEVICE(device_id) \
1327         { \
1328                 .vendor     = PCI_VENDOR_ID_MICROSEMI, \
1329                 .device     = device_id, \
1330                 .subvendor  = PCI_ANY_ID, \
1331                 .subdevice  = PCI_ANY_ID, \
1332                 .class      = (PCI_CLASS_MEMORY_OTHER << 8), \
1333                 .class_mask = 0xFFFFFFFF, \
1334         }, \
1335         { \
1336                 .vendor     = PCI_VENDOR_ID_MICROSEMI, \
1337                 .device     = device_id, \
1338                 .subvendor  = PCI_ANY_ID, \
1339                 .subdevice  = PCI_ANY_ID, \
1340                 .class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
1341                 .class_mask = 0xFFFFFFFF, \
1342         }
1343
1344 static const struct pci_device_id switchtec_pci_tbl[] = {
1345         SWITCHTEC_PCI_DEVICE(0x8531),  //PFX 24xG3
1346         SWITCHTEC_PCI_DEVICE(0x8532),  //PFX 32xG3
1347         SWITCHTEC_PCI_DEVICE(0x8533),  //PFX 48xG3
1348         SWITCHTEC_PCI_DEVICE(0x8534),  //PFX 64xG3
1349         SWITCHTEC_PCI_DEVICE(0x8535),  //PFX 80xG3
1350         SWITCHTEC_PCI_DEVICE(0x8536),  //PFX 96xG3
1351         SWITCHTEC_PCI_DEVICE(0x8541),  //PSX 24xG3
1352         SWITCHTEC_PCI_DEVICE(0x8542),  //PSX 32xG3
1353         SWITCHTEC_PCI_DEVICE(0x8543),  //PSX 48xG3
1354         SWITCHTEC_PCI_DEVICE(0x8544),  //PSX 64xG3
1355         SWITCHTEC_PCI_DEVICE(0x8545),  //PSX 80xG3
1356         SWITCHTEC_PCI_DEVICE(0x8546),  //PSX 96xG3
1357         SWITCHTEC_PCI_DEVICE(0x8551),  //PAX 24XG3
1358         SWITCHTEC_PCI_DEVICE(0x8552),  //PAX 32XG3
1359         SWITCHTEC_PCI_DEVICE(0x8553),  //PAX 48XG3
1360         SWITCHTEC_PCI_DEVICE(0x8554),  //PAX 64XG3
1361         SWITCHTEC_PCI_DEVICE(0x8555),  //PAX 80XG3
1362         SWITCHTEC_PCI_DEVICE(0x8556),  //PAX 96XG3
1363         SWITCHTEC_PCI_DEVICE(0x8561),  //PFXL 24XG3
1364         SWITCHTEC_PCI_DEVICE(0x8562),  //PFXL 32XG3
1365         SWITCHTEC_PCI_DEVICE(0x8563),  //PFXL 48XG3
1366         SWITCHTEC_PCI_DEVICE(0x8564),  //PFXL 64XG3
1367         SWITCHTEC_PCI_DEVICE(0x8565),  //PFXL 80XG3
1368         SWITCHTEC_PCI_DEVICE(0x8566),  //PFXL 96XG3
1369         SWITCHTEC_PCI_DEVICE(0x8571),  //PFXI 24XG3
1370         SWITCHTEC_PCI_DEVICE(0x8572),  //PFXI 32XG3
1371         SWITCHTEC_PCI_DEVICE(0x8573),  //PFXI 48XG3
1372         SWITCHTEC_PCI_DEVICE(0x8574),  //PFXI 64XG3
1373         SWITCHTEC_PCI_DEVICE(0x8575),  //PFXI 80XG3
1374         SWITCHTEC_PCI_DEVICE(0x8576),  //PFXI 96XG3
1375         {0}
1376 };
1377 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1378
1379 static struct pci_driver switchtec_pci_driver = {
1380         .name           = KBUILD_MODNAME,
1381         .id_table       = switchtec_pci_tbl,
1382         .probe          = switchtec_pci_probe,
1383         .remove         = switchtec_pci_remove,
1384 };
1385
1386 static int __init switchtec_init(void)
1387 {
1388         int rc;
1389
1390         rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1391                                  "switchtec");
1392         if (rc)
1393                 return rc;
1394
1395         switchtec_class = class_create(THIS_MODULE, "switchtec");
1396         if (IS_ERR(switchtec_class)) {
1397                 rc = PTR_ERR(switchtec_class);
1398                 goto err_create_class;
1399         }
1400
1401         rc = pci_register_driver(&switchtec_pci_driver);
1402         if (rc)
1403                 goto err_pci_register;
1404
1405         pr_info(KBUILD_MODNAME ": loaded.\n");
1406
1407         return 0;
1408
1409 err_pci_register:
1410         class_destroy(switchtec_class);
1411
1412 err_create_class:
1413         unregister_chrdev_region(switchtec_devt, max_devices);
1414
1415         return rc;
1416 }
1417 module_init(switchtec_init);
1418
1419 static void __exit switchtec_exit(void)
1420 {
1421         pci_unregister_driver(&switchtec_pci_driver);
1422         class_destroy(switchtec_class);
1423         unregister_chrdev_region(switchtec_devt, max_devices);
1424         ida_destroy(&switchtec_minor_ida);
1425
1426         pr_info(KBUILD_MODNAME ": unloaded.\n");
1427 }
1428 module_exit(switchtec_exit);