Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski...
[linux-2.6-block.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  */
16
17 #include <linux/acpi.h>
18 #include <linux/cdev.h>
19 #include <linux/ctype.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/nls.h>
23 #include <linux/netdevice.h>
24 #include <linux/platform_device.h>
25 #include <linux/uuid.h>
26 #include <linux/crash_dump.h>
27
28 #include "channel_guid.h"
29 #include "controlvmchannel.h"
30 #include "controlvmcompletionstatus.h"
31 #include "guestlinuxdebug.h"
32 #include "periodic_work.h"
33 #include "version.h"
34 #include "visorbus.h"
35 #include "visorbus_private.h"
36 #include "vmcallinterface.h"
37
38 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE   50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
47
48 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET     0x00000000
49
50 #define UNISYS_SPAR_LEAF_ID 0x40000000
51
52 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
53 #define UNISYS_SPAR_ID_EBX 0x73696e55
54 #define UNISYS_SPAR_ID_ECX 0x70537379
55 #define UNISYS_SPAR_ID_EDX 0x34367261
56
57 /*
58  * Module parameters
59  */
60 static int visorchipset_major;
61 static int visorchipset_visorbusregwait = 1;    /* default is on */
62 static int visorchipset_holdchipsetready;
63 static unsigned long controlvm_payload_bytes_buffered;
64 static u32 dump_vhba_bus;
65
66 static int
67 visorchipset_open(struct inode *inode, struct file *file)
68 {
69         unsigned minor_number = iminor(inode);
70
71         if (minor_number)
72                 return -ENODEV;
73         file->private_data = NULL;
74         return 0;
75 }
76
77 static int
78 visorchipset_release(struct inode *inode, struct file *file)
79 {
80         return 0;
81 }
82
83 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84 * we switch to slow polling mode.  As soon as we get a controlvm
85 * message, we switch back to fast polling mode.
86 */
87 #define MIN_IDLE_SECONDS 10
88 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
89 /* when we got our last controlvm message */
90 static unsigned long most_recent_message_jiffies;
91 static int visorbusregistered;
92
93 #define MAX_CHIPSET_EVENTS 2
94 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
95
96 struct parser_context {
97         unsigned long allocbytes;
98         unsigned long param_bytes;
99         u8 *curr;
100         unsigned long bytes_remaining;
101         bool byte_stream;
102         char data[0];
103 };
104
105 static struct delayed_work periodic_controlvm_work;
106 static DEFINE_SEMAPHORE(notifier_lock);
107
108 static struct cdev file_cdev;
109 static struct visorchannel **file_controlvm_channel;
110 static struct controlvm_message_header g_chipset_msg_hdr;
111 static struct controlvm_message_packet g_devicechangestate_packet;
112
113 static LIST_HEAD(bus_info_list);
114 static LIST_HEAD(dev_info_list);
115
116 static struct visorchannel *controlvm_channel;
117
118 /* Manages the request payload in the controlvm channel */
119 struct visor_controlvm_payload_info {
120         u8 *ptr;                /* pointer to base address of payload pool */
121         u64 offset;             /* offset from beginning of controlvm
122                                  * channel to beginning of payload * pool
123                                  */
124         u32 bytes;              /* number of bytes in payload pool */
125 };
126
127 static struct visor_controlvm_payload_info controlvm_payload_info;
128
129 /* The following globals are used to handle the scenario where we are unable to
130  * offload the payload from a controlvm message due to memory requirements.  In
131  * this scenario, we simply stash the controlvm message, then attempt to
132  * process it again the next time controlvm_periodic_work() runs.
133  */
134 static struct controlvm_message controlvm_pending_msg;
135 static bool controlvm_pending_msg_valid;
136
137 /* This identifies a data buffer that has been received via a controlvm messages
138  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
139  */
140 struct putfile_buffer_entry {
141         struct list_head next;  /* putfile_buffer_entry list */
142         struct parser_context *parser_ctx; /* points to input data buffer */
143 };
144
145 /* List of struct putfile_request *, via next_putfile_request member.
146  * Each entry in this list identifies an outstanding TRANSMIT_FILE
147  * conversation.
148  */
149 static LIST_HEAD(putfile_request_list);
150
151 /* This describes a buffer and its current state of transfer (e.g., how many
152  * bytes have already been supplied as putfile data, and how many bytes are
153  * remaining) for a putfile_request.
154  */
155 struct putfile_active_buffer {
156         /* a payload from a controlvm message, containing a file data buffer */
157         struct parser_context *parser_ctx;
158         /* points within data area of parser_ctx to next byte of data */
159         u8 *pnext;
160         /* # bytes left from <pnext> to the end of this data buffer */
161         size_t bytes_remaining;
162 };
163
164 #define PUTFILE_REQUEST_SIG 0x0906101302281211
165 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
166  * conversation.  Structs of this type are dynamically linked into
167  * <Putfile_request_list>.
168  */
169 struct putfile_request {
170         u64 sig;                /* PUTFILE_REQUEST_SIG */
171
172         /* header from original TransmitFile request */
173         struct controlvm_message_header controlvm_header;
174         u64 file_request_number;        /* from original TransmitFile request */
175
176         /* link to next struct putfile_request */
177         struct list_head next_putfile_request;
178
179         /* most-recent sequence number supplied via a controlvm message */
180         u64 data_sequence_number;
181
182         /* head of putfile_buffer_entry list, which describes the data to be
183          * supplied as putfile data;
184          * - this list is added to when controlvm messages come in that supply
185          * file data
186          * - this list is removed from via the hotplug program that is actually
187          * consuming these buffers to write as file data
188          */
189         struct list_head input_buffer_list;
190         spinlock_t req_list_lock;       /* lock for input_buffer_list */
191
192         /* waiters for input_buffer_list to go non-empty */
193         wait_queue_head_t input_buffer_wq;
194
195         /* data not yet read within current putfile_buffer_entry */
196         struct putfile_active_buffer active_buf;
197
198         /* <0 = failed, 0 = in-progress, >0 = successful; */
199         /* note that this must be set with req_list_lock, and if you set <0, */
200         /* it is your responsibility to also free up all of the other objects */
201         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202         /* before releasing the lock */
203         int completion_status;
204 };
205
206 struct parahotplug_request {
207         struct list_head list;
208         int id;
209         unsigned long expiration;
210         struct controlvm_message msg;
211 };
212
213 static LIST_HEAD(parahotplug_request_list);
214 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
215 static void parahotplug_process_list(void);
216
217 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
218  * CONTROLVM_REPORTEVENT.
219  */
220 static struct visorchipset_busdev_notifiers busdev_notifiers;
221
222 static void bus_create_response(struct visor_device *p, int response);
223 static void bus_destroy_response(struct visor_device *p, int response);
224 static void device_create_response(struct visor_device *p, int response);
225 static void device_destroy_response(struct visor_device *p, int response);
226 static void device_resume_response(struct visor_device *p, int response);
227
228 static void visorchipset_device_pause_response(struct visor_device *p,
229                                                int response);
230
231 static struct visorchipset_busdev_responders busdev_responders = {
232         .bus_create = bus_create_response,
233         .bus_destroy = bus_destroy_response,
234         .device_create = device_create_response,
235         .device_destroy = device_destroy_response,
236         .device_pause = visorchipset_device_pause_response,
237         .device_resume = device_resume_response,
238 };
239
240 /* info for /dev/visorchipset */
241 static dev_t major_dev = -1; /**< indicates major num for device */
242
243 /* prototypes for attributes */
244 static ssize_t toolaction_show(struct device *dev,
245                                struct device_attribute *attr, char *buf);
246 static ssize_t toolaction_store(struct device *dev,
247                                 struct device_attribute *attr,
248                                 const char *buf, size_t count);
249 static DEVICE_ATTR_RW(toolaction);
250
251 static ssize_t boottotool_show(struct device *dev,
252                                struct device_attribute *attr, char *buf);
253 static ssize_t boottotool_store(struct device *dev,
254                                 struct device_attribute *attr, const char *buf,
255                                 size_t count);
256 static DEVICE_ATTR_RW(boottotool);
257
258 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
259                           char *buf);
260 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
261                            const char *buf, size_t count);
262 static DEVICE_ATTR_RW(error);
263
264 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
265                            char *buf);
266 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
267                             const char *buf, size_t count);
268 static DEVICE_ATTR_RW(textid);
269
270 static ssize_t remaining_steps_show(struct device *dev,
271                                     struct device_attribute *attr, char *buf);
272 static ssize_t remaining_steps_store(struct device *dev,
273                                      struct device_attribute *attr,
274                                      const char *buf, size_t count);
275 static DEVICE_ATTR_RW(remaining_steps);
276
277 static ssize_t chipsetready_store(struct device *dev,
278                                   struct device_attribute *attr,
279                                   const char *buf, size_t count);
280 static DEVICE_ATTR_WO(chipsetready);
281
282 static ssize_t devicedisabled_store(struct device *dev,
283                                     struct device_attribute *attr,
284                                     const char *buf, size_t count);
285 static DEVICE_ATTR_WO(devicedisabled);
286
287 static ssize_t deviceenabled_store(struct device *dev,
288                                    struct device_attribute *attr,
289                                    const char *buf, size_t count);
290 static DEVICE_ATTR_WO(deviceenabled);
291
292 static struct attribute *visorchipset_install_attrs[] = {
293         &dev_attr_toolaction.attr,
294         &dev_attr_boottotool.attr,
295         &dev_attr_error.attr,
296         &dev_attr_textid.attr,
297         &dev_attr_remaining_steps.attr,
298         NULL
299 };
300
301 static struct attribute_group visorchipset_install_group = {
302         .name = "install",
303         .attrs = visorchipset_install_attrs
304 };
305
306 static struct attribute *visorchipset_guest_attrs[] = {
307         &dev_attr_chipsetready.attr,
308         NULL
309 };
310
311 static struct attribute_group visorchipset_guest_group = {
312         .name = "guest",
313         .attrs = visorchipset_guest_attrs
314 };
315
316 static struct attribute *visorchipset_parahotplug_attrs[] = {
317         &dev_attr_devicedisabled.attr,
318         &dev_attr_deviceenabled.attr,
319         NULL
320 };
321
322 static struct attribute_group visorchipset_parahotplug_group = {
323         .name = "parahotplug",
324         .attrs = visorchipset_parahotplug_attrs
325 };
326
327 static const struct attribute_group *visorchipset_dev_groups[] = {
328         &visorchipset_install_group,
329         &visorchipset_guest_group,
330         &visorchipset_parahotplug_group,
331         NULL
332 };
333
334 static void visorchipset_dev_release(struct device *dev)
335 {
336 }
337
338 /* /sys/devices/platform/visorchipset */
339 static struct platform_device visorchipset_platform_device = {
340         .name = "visorchipset",
341         .id = -1,
342         .dev.groups = visorchipset_dev_groups,
343         .dev.release = visorchipset_dev_release,
344 };
345
346 /* Function prototypes */
347 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
348                               int response);
349 static void controlvm_respond_chipset_init(
350                 struct controlvm_message_header *msg_hdr, int response,
351                 enum ultra_chipset_feature features);
352 static void controlvm_respond_physdev_changestate(
353                 struct controlvm_message_header *msg_hdr, int response,
354                 struct spar_segment_state state);
355
356 static void parser_done(struct parser_context *ctx);
357
358 static struct parser_context *
359 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
360 {
361         int allocbytes = sizeof(struct parser_context) + bytes;
362         struct parser_context *rc = NULL;
363         struct parser_context *ctx = NULL;
364
365         if (retry)
366                 *retry = false;
367
368         /*
369          * alloc an 0 extra byte to ensure payload is
370          * '\0'-terminated
371          */
372         allocbytes++;
373         if ((controlvm_payload_bytes_buffered + bytes)
374             > MAX_CONTROLVM_PAYLOAD_BYTES) {
375                 if (retry)
376                         *retry = true;
377                 rc = NULL;
378                 goto cleanup;
379         }
380         ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
381         if (!ctx) {
382                 if (retry)
383                         *retry = true;
384                 rc = NULL;
385                 goto cleanup;
386         }
387
388         ctx->allocbytes = allocbytes;
389         ctx->param_bytes = bytes;
390         ctx->curr = NULL;
391         ctx->bytes_remaining = 0;
392         ctx->byte_stream = false;
393         if (local) {
394                 void *p;
395
396                 if (addr > virt_to_phys(high_memory - 1)) {
397                         rc = NULL;
398                         goto cleanup;
399                 }
400                 p = __va((unsigned long)(addr));
401                 memcpy(ctx->data, p, bytes);
402         } else {
403                 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
404
405                 if (!mapping) {
406                         rc = NULL;
407                         goto cleanup;
408                 }
409                 memcpy(ctx->data, mapping, bytes);
410                 memunmap(mapping);
411         }
412
413         ctx->byte_stream = true;
414         rc = ctx;
415 cleanup:
416         if (rc) {
417                 controlvm_payload_bytes_buffered += ctx->param_bytes;
418         } else {
419                 if (ctx) {
420                         parser_done(ctx);
421                         ctx = NULL;
422                 }
423         }
424         return rc;
425 }
426
427 static uuid_le
428 parser_id_get(struct parser_context *ctx)
429 {
430         struct spar_controlvm_parameters_header *phdr = NULL;
431
432         if (!ctx)
433                 return NULL_UUID_LE;
434         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
435         return phdr->id;
436 }
437
438 /** Describes the state from the perspective of which controlvm messages have
439  *  been received for a bus or device.
440  */
441
442 enum PARSER_WHICH_STRING {
443         PARSERSTRING_INITIATOR,
444         PARSERSTRING_TARGET,
445         PARSERSTRING_CONNECTION,
446         PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
447 };
448
449 static void
450 parser_param_start(struct parser_context *ctx,
451                    enum PARSER_WHICH_STRING which_string)
452 {
453         struct spar_controlvm_parameters_header *phdr = NULL;
454
455         if (!ctx)
456                 return;
457
458         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
459         switch (which_string) {
460         case PARSERSTRING_INITIATOR:
461                 ctx->curr = ctx->data + phdr->initiator_offset;
462                 ctx->bytes_remaining = phdr->initiator_length;
463                 break;
464         case PARSERSTRING_TARGET:
465                 ctx->curr = ctx->data + phdr->target_offset;
466                 ctx->bytes_remaining = phdr->target_length;
467                 break;
468         case PARSERSTRING_CONNECTION:
469                 ctx->curr = ctx->data + phdr->connection_offset;
470                 ctx->bytes_remaining = phdr->connection_length;
471                 break;
472         case PARSERSTRING_NAME:
473                 ctx->curr = ctx->data + phdr->name_offset;
474                 ctx->bytes_remaining = phdr->name_length;
475                 break;
476         default:
477                 break;
478         }
479 }
480
481 static void parser_done(struct parser_context *ctx)
482 {
483         if (!ctx)
484                 return;
485         controlvm_payload_bytes_buffered -= ctx->param_bytes;
486         kfree(ctx);
487 }
488
489 static void *
490 parser_string_get(struct parser_context *ctx)
491 {
492         u8 *pscan;
493         unsigned long nscan;
494         int value_length = -1;
495         void *value = NULL;
496         int i;
497
498         if (!ctx)
499                 return NULL;
500         pscan = ctx->curr;
501         nscan = ctx->bytes_remaining;
502         if (nscan == 0)
503                 return NULL;
504         if (!pscan)
505                 return NULL;
506         for (i = 0, value_length = -1; i < nscan; i++)
507                 if (pscan[i] == '\0') {
508                         value_length = i;
509                         break;
510                 }
511         if (value_length < 0)   /* '\0' was not included in the length */
512                 value_length = nscan;
513         value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
514         if (!value)
515                 return NULL;
516         if (value_length > 0)
517                 memcpy(value, pscan, value_length);
518         ((u8 *)(value))[value_length] = '\0';
519         return value;
520 }
521
522 static ssize_t toolaction_show(struct device *dev,
523                                struct device_attribute *attr,
524                                char *buf)
525 {
526         u8 tool_action;
527
528         visorchannel_read(controlvm_channel,
529                           offsetof(struct spar_controlvm_channel_protocol,
530                                    tool_action), &tool_action, sizeof(u8));
531         return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
532 }
533
534 static ssize_t toolaction_store(struct device *dev,
535                                 struct device_attribute *attr,
536                                 const char *buf, size_t count)
537 {
538         u8 tool_action;
539         int ret;
540
541         if (kstrtou8(buf, 10, &tool_action))
542                 return -EINVAL;
543
544         ret = visorchannel_write(controlvm_channel,
545                 offsetof(struct spar_controlvm_channel_protocol,
546                          tool_action),
547                 &tool_action, sizeof(u8));
548
549         if (ret)
550                 return ret;
551         return count;
552 }
553
554 static ssize_t boottotool_show(struct device *dev,
555                                struct device_attribute *attr,
556                                char *buf)
557 {
558         struct efi_spar_indication efi_spar_indication;
559
560         visorchannel_read(controlvm_channel,
561                           offsetof(struct spar_controlvm_channel_protocol,
562                                    efi_spar_ind), &efi_spar_indication,
563                           sizeof(struct efi_spar_indication));
564         return scnprintf(buf, PAGE_SIZE, "%u\n",
565                          efi_spar_indication.boot_to_tool);
566 }
567
568 static ssize_t boottotool_store(struct device *dev,
569                                 struct device_attribute *attr,
570                                 const char *buf, size_t count)
571 {
572         int val, ret;
573         struct efi_spar_indication efi_spar_indication;
574
575         if (kstrtoint(buf, 10, &val))
576                 return -EINVAL;
577
578         efi_spar_indication.boot_to_tool = val;
579         ret = visorchannel_write(controlvm_channel,
580                         offsetof(struct spar_controlvm_channel_protocol,
581                                  efi_spar_ind), &(efi_spar_indication),
582                                  sizeof(struct efi_spar_indication));
583
584         if (ret)
585                 return ret;
586         return count;
587 }
588
589 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
590                           char *buf)
591 {
592         u32 error;
593
594         visorchannel_read(controlvm_channel,
595                           offsetof(struct spar_controlvm_channel_protocol,
596                                    installation_error),
597                           &error, sizeof(u32));
598         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
599 }
600
601 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
602                            const char *buf, size_t count)
603 {
604         u32 error;
605         int ret;
606
607         if (kstrtou32(buf, 10, &error))
608                 return -EINVAL;
609
610         ret = visorchannel_write(controlvm_channel,
611                 offsetof(struct spar_controlvm_channel_protocol,
612                          installation_error),
613                 &error, sizeof(u32));
614         if (ret)
615                 return ret;
616         return count;
617 }
618
619 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
620                            char *buf)
621 {
622         u32 text_id;
623
624         visorchannel_read(controlvm_channel,
625                           offsetof(struct spar_controlvm_channel_protocol,
626                                    installation_text_id),
627                           &text_id, sizeof(u32));
628         return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
629 }
630
631 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
632                             const char *buf, size_t count)
633 {
634         u32 text_id;
635         int ret;
636
637         if (kstrtou32(buf, 10, &text_id))
638                 return -EINVAL;
639
640         ret = visorchannel_write(controlvm_channel,
641                 offsetof(struct spar_controlvm_channel_protocol,
642                          installation_text_id),
643                 &text_id, sizeof(u32));
644         if (ret)
645                 return ret;
646         return count;
647 }
648
649 static ssize_t remaining_steps_show(struct device *dev,
650                                     struct device_attribute *attr, char *buf)
651 {
652         u16 remaining_steps;
653
654         visorchannel_read(controlvm_channel,
655                           offsetof(struct spar_controlvm_channel_protocol,
656                                    installation_remaining_steps),
657                           &remaining_steps, sizeof(u16));
658         return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
659 }
660
661 static ssize_t remaining_steps_store(struct device *dev,
662                                      struct device_attribute *attr,
663                                      const char *buf, size_t count)
664 {
665         u16 remaining_steps;
666         int ret;
667
668         if (kstrtou16(buf, 10, &remaining_steps))
669                 return -EINVAL;
670
671         ret = visorchannel_write(controlvm_channel,
672                 offsetof(struct spar_controlvm_channel_protocol,
673                          installation_remaining_steps),
674                 &remaining_steps, sizeof(u16));
675         if (ret)
676                 return ret;
677         return count;
678 }
679
680 struct visor_busdev {
681         u32 bus_no;
682         u32 dev_no;
683 };
684
685 static int match_visorbus_dev_by_id(struct device *dev, void *data)
686 {
687         struct visor_device *vdev = to_visor_device(dev);
688         struct visor_busdev *id = data;
689         u32 bus_no = id->bus_no;
690         u32 dev_no = id->dev_no;
691
692         if ((vdev->chipset_bus_no == bus_no) &&
693             (vdev->chipset_dev_no == dev_no))
694                 return 1;
695
696         return 0;
697 }
698
699 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
700                                                struct visor_device *from)
701 {
702         struct device *dev;
703         struct device *dev_start = NULL;
704         struct visor_device *vdev = NULL;
705         struct visor_busdev id = {
706                         .bus_no = bus_no,
707                         .dev_no = dev_no
708                 };
709
710         if (from)
711                 dev_start = &from->device;
712         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
713                               match_visorbus_dev_by_id);
714         if (dev)
715                 vdev = to_visor_device(dev);
716         return vdev;
717 }
718 EXPORT_SYMBOL(visorbus_get_device_by_id);
719
720 static u8
721 check_chipset_events(void)
722 {
723         int i;
724         u8 send_msg = 1;
725         /* Check events to determine if response should be sent */
726         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
727                 send_msg &= chipset_events[i];
728         return send_msg;
729 }
730
731 static void
732 clear_chipset_events(void)
733 {
734         int i;
735         /* Clear chipset_events */
736         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
737                 chipset_events[i] = 0;
738 }
739
740 void
741 visorchipset_register_busdev(
742                         struct visorchipset_busdev_notifiers *notifiers,
743                         struct visorchipset_busdev_responders *responders,
744                         struct ultra_vbus_deviceinfo *driver_info)
745 {
746         down(&notifier_lock);
747         if (!notifiers) {
748                 memset(&busdev_notifiers, 0,
749                        sizeof(busdev_notifiers));
750                 visorbusregistered = 0; /* clear flag */
751         } else {
752                 busdev_notifiers = *notifiers;
753                 visorbusregistered = 1; /* set flag */
754         }
755         if (responders)
756                 *responders = busdev_responders;
757         if (driver_info)
758                 bus_device_info_init(driver_info, "chipset", "visorchipset",
759                                      VERSION, NULL);
760
761         up(&notifier_lock);
762 }
763 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
764
765 static void
766 chipset_init(struct controlvm_message *inmsg)
767 {
768         static int chipset_inited;
769         enum ultra_chipset_feature features = 0;
770         int rc = CONTROLVM_RESP_SUCCESS;
771
772         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
773         if (chipset_inited) {
774                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
775                 goto cleanup;
776         }
777         chipset_inited = 1;
778         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
779
780         /* Set features to indicate we support parahotplug (if Command
781          * also supports it).
782          */
783         features =
784             inmsg->cmd.init_chipset.
785             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
786
787         /* Set the "reply" bit so Command knows this is a
788          * features-aware driver.
789          */
790         features |= ULTRA_CHIPSET_FEATURE_REPLY;
791
792 cleanup:
793         if (inmsg->hdr.flags.response_expected)
794                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
795 }
796
797 static void
798 controlvm_init_response(struct controlvm_message *msg,
799                         struct controlvm_message_header *msg_hdr, int response)
800 {
801         memset(msg, 0, sizeof(struct controlvm_message));
802         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
803         msg->hdr.payload_bytes = 0;
804         msg->hdr.payload_vm_offset = 0;
805         msg->hdr.payload_max_bytes = 0;
806         if (response < 0) {
807                 msg->hdr.flags.failed = 1;
808                 msg->hdr.completion_status = (u32)(-response);
809         }
810 }
811
812 static void
813 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
814 {
815         struct controlvm_message outmsg;
816
817         controlvm_init_response(&outmsg, msg_hdr, response);
818         if (outmsg.hdr.flags.test_message == 1)
819                 return;
820
821         if (!visorchannel_signalinsert(controlvm_channel,
822                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
823                 return;
824         }
825 }
826
827 static void
828 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
829                                int response,
830                                enum ultra_chipset_feature features)
831 {
832         struct controlvm_message outmsg;
833
834         controlvm_init_response(&outmsg, msg_hdr, response);
835         outmsg.cmd.init_chipset.features = features;
836         if (!visorchannel_signalinsert(controlvm_channel,
837                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
838                 return;
839         }
840 }
841
842 static void controlvm_respond_physdev_changestate(
843                 struct controlvm_message_header *msg_hdr, int response,
844                 struct spar_segment_state state)
845 {
846         struct controlvm_message outmsg;
847
848         controlvm_init_response(&outmsg, msg_hdr, response);
849         outmsg.cmd.device_change_state.state = state;
850         outmsg.cmd.device_change_state.flags.phys_device = 1;
851         if (!visorchannel_signalinsert(controlvm_channel,
852                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
853                 return;
854         }
855 }
856
857 enum crash_obj_type {
858         CRASH_DEV,
859         CRASH_BUS,
860 };
861
862 static void
863 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
864 {
865         u32 local_crash_msg_offset;
866         u16 local_crash_msg_count;
867
868         if (visorchannel_read(controlvm_channel,
869                               offsetof(struct spar_controlvm_channel_protocol,
870                                        saved_crash_message_count),
871                               &local_crash_msg_count, sizeof(u16)) < 0) {
872                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
873                                  POSTCODE_SEVERITY_ERR);
874                 return;
875         }
876
877         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
878                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
879                                  local_crash_msg_count,
880                                  POSTCODE_SEVERITY_ERR);
881                 return;
882         }
883
884         if (visorchannel_read(controlvm_channel,
885                               offsetof(struct spar_controlvm_channel_protocol,
886                                        saved_crash_message_offset),
887                               &local_crash_msg_offset, sizeof(u32)) < 0) {
888                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
889                                  POSTCODE_SEVERITY_ERR);
890                 return;
891         }
892
893         if (typ == CRASH_BUS) {
894                 if (visorchannel_write(controlvm_channel,
895                                        local_crash_msg_offset,
896                                        msg,
897                                        sizeof(struct controlvm_message)) < 0) {
898                         POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
899                                          POSTCODE_SEVERITY_ERR);
900                         return;
901                 }
902         } else {
903                 local_crash_msg_offset += sizeof(struct controlvm_message);
904                 if (visorchannel_write(controlvm_channel,
905                                        local_crash_msg_offset,
906                                        msg,
907                                        sizeof(struct controlvm_message)) < 0) {
908                         POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
909                                          POSTCODE_SEVERITY_ERR);
910                         return;
911                 }
912         }
913 }
914
915 static void
916 bus_responder(enum controlvm_id cmd_id,
917               struct controlvm_message_header *pending_msg_hdr,
918               int response)
919 {
920         if (!pending_msg_hdr)
921                 return;         /* no controlvm response needed */
922
923         if (pending_msg_hdr->id != (u32)cmd_id)
924                 return;
925
926         controlvm_respond(pending_msg_hdr, response);
927 }
928
929 static void
930 device_changestate_responder(enum controlvm_id cmd_id,
931                              struct visor_device *p, int response,
932                              struct spar_segment_state response_state)
933 {
934         struct controlvm_message outmsg;
935         u32 bus_no = p->chipset_bus_no;
936         u32 dev_no = p->chipset_dev_no;
937
938         if (!p->pending_msg_hdr)
939                 return;         /* no controlvm response needed */
940         if (p->pending_msg_hdr->id != cmd_id)
941                 return;
942
943         controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
944
945         outmsg.cmd.device_change_state.bus_no = bus_no;
946         outmsg.cmd.device_change_state.dev_no = dev_no;
947         outmsg.cmd.device_change_state.state = response_state;
948
949         if (!visorchannel_signalinsert(controlvm_channel,
950                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
951                 return;
952 }
953
954 static void
955 device_responder(enum controlvm_id cmd_id,
956                  struct controlvm_message_header *pending_msg_hdr,
957                  int response)
958 {
959         if (!pending_msg_hdr)
960                 return;         /* no controlvm response needed */
961
962         if (pending_msg_hdr->id != (u32)cmd_id)
963                 return;
964
965         controlvm_respond(pending_msg_hdr, response);
966 }
967
968 static void
969 bus_epilog(struct visor_device *bus_info,
970            u32 cmd, struct controlvm_message_header *msg_hdr,
971            int response, bool need_response)
972 {
973         bool notified = false;
974         struct controlvm_message_header *pmsg_hdr = NULL;
975
976         if (!bus_info) {
977                 /* relying on a valid passed in response code */
978                 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
979                 pmsg_hdr = msg_hdr;
980                 goto away;
981         }
982
983         if (bus_info->pending_msg_hdr) {
984                 /* only non-NULL if dev is still waiting on a response */
985                 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
986                 pmsg_hdr = bus_info->pending_msg_hdr;
987                 goto away;
988         }
989
990         if (need_response) {
991                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
992                 if (!pmsg_hdr) {
993                         response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
994                         goto away;
995                 }
996
997                 memcpy(pmsg_hdr, msg_hdr,
998                        sizeof(struct controlvm_message_header));
999                 bus_info->pending_msg_hdr = pmsg_hdr;
1000         }
1001
1002         down(&notifier_lock);
1003         if (response == CONTROLVM_RESP_SUCCESS) {
1004                 switch (cmd) {
1005                 case CONTROLVM_BUS_CREATE:
1006                         if (busdev_notifiers.bus_create) {
1007                                 (*busdev_notifiers.bus_create) (bus_info);
1008                                 notified = true;
1009                         }
1010                         break;
1011                 case CONTROLVM_BUS_DESTROY:
1012                         if (busdev_notifiers.bus_destroy) {
1013                                 (*busdev_notifiers.bus_destroy) (bus_info);
1014                                 notified = true;
1015                         }
1016                         break;
1017                 }
1018         }
1019 away:
1020         if (notified)
1021                 /* The callback function just called above is responsible
1022                  * for calling the appropriate visorchipset_busdev_responders
1023                  * function, which will call bus_responder()
1024                  */
1025                 ;
1026         else
1027                 /*
1028                  * Do not kfree(pmsg_hdr) as this is the failure path.
1029                  * The success path ('notified') will call the responder
1030                  * directly and kfree() there.
1031                  */
1032                 bus_responder(cmd, pmsg_hdr, response);
1033         up(&notifier_lock);
1034 }
1035
1036 static void
1037 device_epilog(struct visor_device *dev_info,
1038               struct spar_segment_state state, u32 cmd,
1039               struct controlvm_message_header *msg_hdr, int response,
1040               bool need_response, bool for_visorbus)
1041 {
1042         struct visorchipset_busdev_notifiers *notifiers;
1043         bool notified = false;
1044         struct controlvm_message_header *pmsg_hdr = NULL;
1045
1046         notifiers = &busdev_notifiers;
1047
1048         if (!dev_info) {
1049                 /* relying on a valid passed in response code */
1050                 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1051                 pmsg_hdr = msg_hdr;
1052                 goto away;
1053         }
1054
1055         if (dev_info->pending_msg_hdr) {
1056                 /* only non-NULL if dev is still waiting on a response */
1057                 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1058                 pmsg_hdr = dev_info->pending_msg_hdr;
1059                 goto away;
1060         }
1061
1062         if (need_response) {
1063                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1064                 if (!pmsg_hdr) {
1065                         response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1066                         goto away;
1067                 }
1068
1069                 memcpy(pmsg_hdr, msg_hdr,
1070                        sizeof(struct controlvm_message_header));
1071                 dev_info->pending_msg_hdr = pmsg_hdr;
1072         }
1073
1074         down(&notifier_lock);
1075         if (response >= 0) {
1076                 switch (cmd) {
1077                 case CONTROLVM_DEVICE_CREATE:
1078                         if (notifiers->device_create) {
1079                                 (*notifiers->device_create) (dev_info);
1080                                 notified = true;
1081                         }
1082                         break;
1083                 case CONTROLVM_DEVICE_CHANGESTATE:
1084                         /* ServerReady / ServerRunning / SegmentStateRunning */
1085                         if (state.alive == segment_state_running.alive &&
1086                             state.operating ==
1087                                 segment_state_running.operating) {
1088                                 if (notifiers->device_resume) {
1089                                         (*notifiers->device_resume) (dev_info);
1090                                         notified = true;
1091                                 }
1092                         }
1093                         /* ServerNotReady / ServerLost / SegmentStateStandby */
1094                         else if (state.alive == segment_state_standby.alive &&
1095                                  state.operating ==
1096                                  segment_state_standby.operating) {
1097                                 /* technically this is standby case
1098                                  * where server is lost
1099                                  */
1100                                 if (notifiers->device_pause) {
1101                                         (*notifiers->device_pause) (dev_info);
1102                                         notified = true;
1103                                 }
1104                         }
1105                         break;
1106                 case CONTROLVM_DEVICE_DESTROY:
1107                         if (notifiers->device_destroy) {
1108                                 (*notifiers->device_destroy) (dev_info);
1109                                 notified = true;
1110                         }
1111                         break;
1112                 }
1113         }
1114 away:
1115         if (notified)
1116                 /* The callback function just called above is responsible
1117                  * for calling the appropriate visorchipset_busdev_responders
1118                  * function, which will call device_responder()
1119                  */
1120                 ;
1121         else
1122                 /*
1123                  * Do not kfree(pmsg_hdr) as this is the failure path.
1124                  * The success path ('notified') will call the responder
1125                  * directly and kfree() there.
1126                  */
1127                 device_responder(cmd, pmsg_hdr, response);
1128         up(&notifier_lock);
1129 }
1130
1131 static void
1132 bus_create(struct controlvm_message *inmsg)
1133 {
1134         struct controlvm_message_packet *cmd = &inmsg->cmd;
1135         u32 bus_no = cmd->create_bus.bus_no;
1136         int rc = CONTROLVM_RESP_SUCCESS;
1137         struct visor_device *bus_info;
1138         struct visorchannel *visorchannel;
1139
1140         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1141         if (bus_info && (bus_info->state.created == 1)) {
1142                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1143                                  POSTCODE_SEVERITY_ERR);
1144                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1145                 goto cleanup;
1146         }
1147         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1148         if (!bus_info) {
1149                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1150                                  POSTCODE_SEVERITY_ERR);
1151                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1152                 goto cleanup;
1153         }
1154
1155         INIT_LIST_HEAD(&bus_info->list_all);
1156         bus_info->chipset_bus_no = bus_no;
1157         bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
1158
1159         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1160
1161         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1162                                            cmd->create_bus.channel_bytes,
1163                                            GFP_KERNEL,
1164                                            cmd->create_bus.bus_data_type_uuid);
1165
1166         if (!visorchannel) {
1167                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1168                                  POSTCODE_SEVERITY_ERR);
1169                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1170                 kfree(bus_info);
1171                 bus_info = NULL;
1172                 goto cleanup;
1173         }
1174         bus_info->visorchannel = visorchannel;
1175         if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
1176                 dump_vhba_bus = bus_no;
1177                 save_crash_message(inmsg, CRASH_BUS);
1178         }
1179
1180         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1181
1182 cleanup:
1183         bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1184                    rc, inmsg->hdr.flags.response_expected == 1);
1185 }
1186
1187 static void
1188 bus_destroy(struct controlvm_message *inmsg)
1189 {
1190         struct controlvm_message_packet *cmd = &inmsg->cmd;
1191         u32 bus_no = cmd->destroy_bus.bus_no;
1192         struct visor_device *bus_info;
1193         int rc = CONTROLVM_RESP_SUCCESS;
1194
1195         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1196         if (!bus_info)
1197                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1198         else if (bus_info->state.created == 0)
1199                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1200
1201         bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1202                    rc, inmsg->hdr.flags.response_expected == 1);
1203
1204         /* bus_info is freed as part of the busdevice_release function */
1205 }
1206
1207 static void
1208 bus_configure(struct controlvm_message *inmsg,
1209               struct parser_context *parser_ctx)
1210 {
1211         struct controlvm_message_packet *cmd = &inmsg->cmd;
1212         u32 bus_no;
1213         struct visor_device *bus_info;
1214         int rc = CONTROLVM_RESP_SUCCESS;
1215
1216         bus_no = cmd->configure_bus.bus_no;
1217         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1218                          POSTCODE_SEVERITY_INFO);
1219
1220         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1221         if (!bus_info) {
1222                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1223                                  POSTCODE_SEVERITY_ERR);
1224                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1225         } else if (bus_info->state.created == 0) {
1226                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1227                                  POSTCODE_SEVERITY_ERR);
1228                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1229         } else if (bus_info->pending_msg_hdr) {
1230                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1231                                  POSTCODE_SEVERITY_ERR);
1232                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1233         } else {
1234                 visorchannel_set_clientpartition(bus_info->visorchannel,
1235                                 cmd->configure_bus.guest_handle);
1236                 bus_info->partition_uuid = parser_id_get(parser_ctx);
1237                 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1238                 bus_info->name = parser_string_get(parser_ctx);
1239
1240                 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1241                                  POSTCODE_SEVERITY_INFO);
1242         }
1243         bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1244                    rc, inmsg->hdr.flags.response_expected == 1);
1245 }
1246
1247 static void
1248 my_device_create(struct controlvm_message *inmsg)
1249 {
1250         struct controlvm_message_packet *cmd = &inmsg->cmd;
1251         u32 bus_no = cmd->create_device.bus_no;
1252         u32 dev_no = cmd->create_device.dev_no;
1253         struct visor_device *dev_info = NULL;
1254         struct visor_device *bus_info;
1255         struct visorchannel *visorchannel;
1256         int rc = CONTROLVM_RESP_SUCCESS;
1257
1258         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1259         if (!bus_info) {
1260                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1261                                  POSTCODE_SEVERITY_ERR);
1262                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1263                 goto cleanup;
1264         }
1265
1266         if (bus_info->state.created == 0) {
1267                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1268                                  POSTCODE_SEVERITY_ERR);
1269                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1270                 goto cleanup;
1271         }
1272
1273         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1274         if (dev_info && (dev_info->state.created == 1)) {
1275                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1276                                  POSTCODE_SEVERITY_ERR);
1277                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1278                 goto cleanup;
1279         }
1280
1281         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1282         if (!dev_info) {
1283                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1284                                  POSTCODE_SEVERITY_ERR);
1285                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1286                 goto cleanup;
1287         }
1288
1289         dev_info->chipset_bus_no = bus_no;
1290         dev_info->chipset_dev_no = dev_no;
1291         dev_info->inst = cmd->create_device.dev_inst_uuid;
1292
1293         /* not sure where the best place to set the 'parent' */
1294         dev_info->device.parent = &bus_info->device;
1295
1296         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1297                          POSTCODE_SEVERITY_INFO);
1298
1299         visorchannel =
1300                visorchannel_create_with_lock(cmd->create_device.channel_addr,
1301                                              cmd->create_device.channel_bytes,
1302                                              GFP_KERNEL,
1303                                              cmd->create_device.data_type_uuid);
1304
1305         if (!visorchannel) {
1306                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1307                                  POSTCODE_SEVERITY_ERR);
1308                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1309                 kfree(dev_info);
1310                 dev_info = NULL;
1311                 goto cleanup;
1312         }
1313         dev_info->visorchannel = visorchannel;
1314         dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
1315         if (uuid_le_cmp(cmd->create_device.data_type_uuid,
1316                         spar_vhba_channel_protocol_uuid) == 0)
1317                 save_crash_message(inmsg, CRASH_DEV);
1318
1319         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1320                          POSTCODE_SEVERITY_INFO);
1321 cleanup:
1322         device_epilog(dev_info, segment_state_running,
1323                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1324                       inmsg->hdr.flags.response_expected == 1, 1);
1325 }
1326
1327 static void
1328 my_device_changestate(struct controlvm_message *inmsg)
1329 {
1330         struct controlvm_message_packet *cmd = &inmsg->cmd;
1331         u32 bus_no = cmd->device_change_state.bus_no;
1332         u32 dev_no = cmd->device_change_state.dev_no;
1333         struct spar_segment_state state = cmd->device_change_state.state;
1334         struct visor_device *dev_info;
1335         int rc = CONTROLVM_RESP_SUCCESS;
1336
1337         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1338         if (!dev_info) {
1339                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1340                                  POSTCODE_SEVERITY_ERR);
1341                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1342         } else if (dev_info->state.created == 0) {
1343                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1344                                  POSTCODE_SEVERITY_ERR);
1345                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1346         }
1347         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1348                 device_epilog(dev_info, state,
1349                               CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1350                               inmsg->hdr.flags.response_expected == 1, 1);
1351 }
1352
1353 static void
1354 my_device_destroy(struct controlvm_message *inmsg)
1355 {
1356         struct controlvm_message_packet *cmd = &inmsg->cmd;
1357         u32 bus_no = cmd->destroy_device.bus_no;
1358         u32 dev_no = cmd->destroy_device.dev_no;
1359         struct visor_device *dev_info;
1360         int rc = CONTROLVM_RESP_SUCCESS;
1361
1362         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1363         if (!dev_info)
1364                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1365         else if (dev_info->state.created == 0)
1366                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1367
1368         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1369                 device_epilog(dev_info, segment_state_running,
1370                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1371                               inmsg->hdr.flags.response_expected == 1, 1);
1372 }
1373
1374 /* When provided with the physical address of the controlvm channel
1375  * (phys_addr), the offset to the payload area we need to manage
1376  * (offset), and the size of this payload area (bytes), fills in the
1377  * controlvm_payload_info struct.  Returns true for success or false
1378  * for failure.
1379  */
1380 static int
1381 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1382                                   struct visor_controlvm_payload_info *info)
1383 {
1384         u8 *payload = NULL;
1385         int rc = CONTROLVM_RESP_SUCCESS;
1386
1387         if (!info) {
1388                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1389                 goto cleanup;
1390         }
1391         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1392         if ((offset == 0) || (bytes == 0)) {
1393                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1394                 goto cleanup;
1395         }
1396         payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
1397         if (!payload) {
1398                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1399                 goto cleanup;
1400         }
1401
1402         info->offset = offset;
1403         info->bytes = bytes;
1404         info->ptr = payload;
1405
1406 cleanup:
1407         if (rc < 0) {
1408                 if (payload) {
1409                         memunmap(payload);
1410                         payload = NULL;
1411                 }
1412         }
1413         return rc;
1414 }
1415
1416 static void
1417 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1418 {
1419         if (info->ptr) {
1420                 memunmap(info->ptr);
1421                 info->ptr = NULL;
1422         }
1423         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1424 }
1425
1426 static void
1427 initialize_controlvm_payload(void)
1428 {
1429         u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1430         u64 payload_offset = 0;
1431         u32 payload_bytes = 0;
1432
1433         if (visorchannel_read(controlvm_channel,
1434                               offsetof(struct spar_controlvm_channel_protocol,
1435                                        request_payload_offset),
1436                               &payload_offset, sizeof(payload_offset)) < 0) {
1437                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1438                                  POSTCODE_SEVERITY_ERR);
1439                 return;
1440         }
1441         if (visorchannel_read(controlvm_channel,
1442                               offsetof(struct spar_controlvm_channel_protocol,
1443                                        request_payload_bytes),
1444                               &payload_bytes, sizeof(payload_bytes)) < 0) {
1445                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1446                                  POSTCODE_SEVERITY_ERR);
1447                 return;
1448         }
1449         initialize_controlvm_payload_info(phys_addr,
1450                                           payload_offset, payload_bytes,
1451                                           &controlvm_payload_info);
1452 }
1453
1454 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1455  *  Returns CONTROLVM_RESP_xxx code.
1456  */
1457 static int
1458 visorchipset_chipset_ready(void)
1459 {
1460         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1461         return CONTROLVM_RESP_SUCCESS;
1462 }
1463
1464 static int
1465 visorchipset_chipset_selftest(void)
1466 {
1467         char env_selftest[20];
1468         char *envp[] = { env_selftest, NULL };
1469
1470         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1471         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1472                            envp);
1473         return CONTROLVM_RESP_SUCCESS;
1474 }
1475
1476 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1477  *  Returns CONTROLVM_RESP_xxx code.
1478  */
1479 static int
1480 visorchipset_chipset_notready(void)
1481 {
1482         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1483         return CONTROLVM_RESP_SUCCESS;
1484 }
1485
1486 static void
1487 chipset_ready(struct controlvm_message_header *msg_hdr)
1488 {
1489         int rc = visorchipset_chipset_ready();
1490
1491         if (rc != CONTROLVM_RESP_SUCCESS)
1492                 rc = -rc;
1493         if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1494                 controlvm_respond(msg_hdr, rc);
1495         if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1496                 /* Send CHIPSET_READY response when all modules have been loaded
1497                  * and disks mounted for the partition
1498                  */
1499                 g_chipset_msg_hdr = *msg_hdr;
1500         }
1501 }
1502
1503 static void
1504 chipset_selftest(struct controlvm_message_header *msg_hdr)
1505 {
1506         int rc = visorchipset_chipset_selftest();
1507
1508         if (rc != CONTROLVM_RESP_SUCCESS)
1509                 rc = -rc;
1510         if (msg_hdr->flags.response_expected)
1511                 controlvm_respond(msg_hdr, rc);
1512 }
1513
1514 static void
1515 chipset_notready(struct controlvm_message_header *msg_hdr)
1516 {
1517         int rc = visorchipset_chipset_notready();
1518
1519         if (rc != CONTROLVM_RESP_SUCCESS)
1520                 rc = -rc;
1521         if (msg_hdr->flags.response_expected)
1522                 controlvm_respond(msg_hdr, rc);
1523 }
1524
1525 /* This is your "one-stop" shop for grabbing the next message from the
1526  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1527  */
1528 static bool
1529 read_controlvm_event(struct controlvm_message *msg)
1530 {
1531         if (visorchannel_signalremove(controlvm_channel,
1532                                       CONTROLVM_QUEUE_EVENT, msg)) {
1533                 /* got a message */
1534                 if (msg->hdr.flags.test_message == 1)
1535                         return false;
1536                 return true;
1537         }
1538         return false;
1539 }
1540
1541 /*
1542  * The general parahotplug flow works as follows.  The visorchipset
1543  * driver receives a DEVICE_CHANGESTATE message from Command
1544  * specifying a physical device to enable or disable.  The CONTROLVM
1545  * message handler calls parahotplug_process_message, which then adds
1546  * the message to a global list and kicks off a udev event which
1547  * causes a user level script to enable or disable the specified
1548  * device.  The udev script then writes to
1549  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1550  * to get called, at which point the appropriate CONTROLVM message is
1551  * retrieved from the list and responded to.
1552  */
1553
1554 #define PARAHOTPLUG_TIMEOUT_MS 2000
1555
1556 /*
1557  * Generate unique int to match an outstanding CONTROLVM message with a
1558  * udev script /proc response
1559  */
1560 static int
1561 parahotplug_next_id(void)
1562 {
1563         static atomic_t id = ATOMIC_INIT(0);
1564
1565         return atomic_inc_return(&id);
1566 }
1567
1568 /*
1569  * Returns the time (in jiffies) when a CONTROLVM message on the list
1570  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1571  */
1572 static unsigned long
1573 parahotplug_next_expiration(void)
1574 {
1575         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1576 }
1577
1578 /*
1579  * Create a parahotplug_request, which is basically a wrapper for a
1580  * CONTROLVM_MESSAGE that we can stick on a list
1581  */
1582 static struct parahotplug_request *
1583 parahotplug_request_create(struct controlvm_message *msg)
1584 {
1585         struct parahotplug_request *req;
1586
1587         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1588         if (!req)
1589                 return NULL;
1590
1591         req->id = parahotplug_next_id();
1592         req->expiration = parahotplug_next_expiration();
1593         req->msg = *msg;
1594
1595         return req;
1596 }
1597
1598 /*
1599  * Free a parahotplug_request.
1600  */
1601 static void
1602 parahotplug_request_destroy(struct parahotplug_request *req)
1603 {
1604         kfree(req);
1605 }
1606
1607 /*
1608  * Cause uevent to run the user level script to do the disable/enable
1609  * specified in (the CONTROLVM message in) the specified
1610  * parahotplug_request
1611  */
1612 static void
1613 parahotplug_request_kickoff(struct parahotplug_request *req)
1614 {
1615         struct controlvm_message_packet *cmd = &req->msg.cmd;
1616         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1617             env_func[40];
1618         char *envp[] = {
1619                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1620         };
1621
1622         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1623         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1624         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1625                 cmd->device_change_state.state.active);
1626         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1627                 cmd->device_change_state.bus_no);
1628         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1629                 cmd->device_change_state.dev_no >> 3);
1630         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1631                 cmd->device_change_state.dev_no & 0x7);
1632
1633         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1634                            envp);
1635 }
1636
1637 /*
1638  * Remove any request from the list that's been on there too long and
1639  * respond with an error.
1640  */
1641 static void
1642 parahotplug_process_list(void)
1643 {
1644         struct list_head *pos;
1645         struct list_head *tmp;
1646
1647         spin_lock(&parahotplug_request_list_lock);
1648
1649         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1650                 struct parahotplug_request *req =
1651                     list_entry(pos, struct parahotplug_request, list);
1652
1653                 if (!time_after_eq(jiffies, req->expiration))
1654                         continue;
1655
1656                 list_del(pos);
1657                 if (req->msg.hdr.flags.response_expected)
1658                         controlvm_respond_physdev_changestate(
1659                                 &req->msg.hdr,
1660                                 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1661                                 req->msg.cmd.device_change_state.state);
1662                 parahotplug_request_destroy(req);
1663         }
1664
1665         spin_unlock(&parahotplug_request_list_lock);
1666 }
1667
1668 /*
1669  * Called from the /proc handler, which means the user script has
1670  * finished the enable/disable.  Find the matching identifier, and
1671  * respond to the CONTROLVM message with success.
1672  */
1673 static int
1674 parahotplug_request_complete(int id, u16 active)
1675 {
1676         struct list_head *pos;
1677         struct list_head *tmp;
1678
1679         spin_lock(&parahotplug_request_list_lock);
1680
1681         /* Look for a request matching "id". */
1682         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1683                 struct parahotplug_request *req =
1684                     list_entry(pos, struct parahotplug_request, list);
1685                 if (req->id == id) {
1686                         /* Found a match.  Remove it from the list and
1687                          * respond.
1688                          */
1689                         list_del(pos);
1690                         spin_unlock(&parahotplug_request_list_lock);
1691                         req->msg.cmd.device_change_state.state.active = active;
1692                         if (req->msg.hdr.flags.response_expected)
1693                                 controlvm_respond_physdev_changestate(
1694                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1695                                         req->msg.cmd.device_change_state.state);
1696                         parahotplug_request_destroy(req);
1697                         return 0;
1698                 }
1699         }
1700
1701         spin_unlock(&parahotplug_request_list_lock);
1702         return -1;
1703 }
1704
1705 /*
1706  * Enables or disables a PCI device by kicking off a udev script
1707  */
1708 static void
1709 parahotplug_process_message(struct controlvm_message *inmsg)
1710 {
1711         struct parahotplug_request *req;
1712
1713         req = parahotplug_request_create(inmsg);
1714
1715         if (!req)
1716                 return;
1717
1718         if (inmsg->cmd.device_change_state.state.active) {
1719                 /* For enable messages, just respond with success
1720                 * right away.  This is a bit of a hack, but there are
1721                 * issues with the early enable messages we get (with
1722                 * either the udev script not detecting that the device
1723                 * is up, or not getting called at all).  Fortunately
1724                 * the messages that get lost don't matter anyway, as
1725                 * devices are automatically enabled at
1726                 * initialization.
1727                 */
1728                 parahotplug_request_kickoff(req);
1729                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1730                         CONTROLVM_RESP_SUCCESS,
1731                         inmsg->cmd.device_change_state.state);
1732                 parahotplug_request_destroy(req);
1733         } else {
1734                 /* For disable messages, add the request to the
1735                 * request list before kicking off the udev script.  It
1736                 * won't get responded to until the script has
1737                 * indicated it's done.
1738                 */
1739                 spin_lock(&parahotplug_request_list_lock);
1740                 list_add_tail(&req->list, &parahotplug_request_list);
1741                 spin_unlock(&parahotplug_request_list_lock);
1742
1743                 parahotplug_request_kickoff(req);
1744         }
1745 }
1746
1747 /* Process a controlvm message.
1748  * Return result:
1749  *    false - this function will return false only in the case where the
1750  *            controlvm message was NOT processed, but processing must be
1751  *            retried before reading the next controlvm message; a
1752  *            scenario where this can occur is when we need to throttle
1753  *            the allocation of memory in which to copy out controlvm
1754  *            payload data
1755  *    true  - processing of the controlvm message completed,
1756  *            either successfully or with an error.
1757  */
1758 static bool
1759 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1760 {
1761         struct controlvm_message_packet *cmd = &inmsg.cmd;
1762         u64 parm_addr;
1763         u32 parm_bytes;
1764         struct parser_context *parser_ctx = NULL;
1765         bool local_addr;
1766         struct controlvm_message ackmsg;
1767
1768         /* create parsing context if necessary */
1769         local_addr = (inmsg.hdr.flags.test_message == 1);
1770         if (channel_addr == 0)
1771                 return true;
1772         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1773         parm_bytes = inmsg.hdr.payload_bytes;
1774
1775         /* Parameter and channel addresses within test messages actually lie
1776          * within our OS-controlled memory.  We need to know that, because it
1777          * makes a difference in how we compute the virtual address.
1778          */
1779         if (parm_addr && parm_bytes) {
1780                 bool retry = false;
1781
1782                 parser_ctx =
1783                     parser_init_byte_stream(parm_addr, parm_bytes,
1784                                             local_addr, &retry);
1785                 if (!parser_ctx && retry)
1786                         return false;
1787         }
1788
1789         if (!local_addr) {
1790                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1791                                         CONTROLVM_RESP_SUCCESS);
1792                 if (controlvm_channel)
1793                         visorchannel_signalinsert(controlvm_channel,
1794                                                   CONTROLVM_QUEUE_ACK,
1795                                                   &ackmsg);
1796         }
1797         switch (inmsg.hdr.id) {
1798         case CONTROLVM_CHIPSET_INIT:
1799                 chipset_init(&inmsg);
1800                 break;
1801         case CONTROLVM_BUS_CREATE:
1802                 bus_create(&inmsg);
1803                 break;
1804         case CONTROLVM_BUS_DESTROY:
1805                 bus_destroy(&inmsg);
1806                 break;
1807         case CONTROLVM_BUS_CONFIGURE:
1808                 bus_configure(&inmsg, parser_ctx);
1809                 break;
1810         case CONTROLVM_DEVICE_CREATE:
1811                 my_device_create(&inmsg);
1812                 break;
1813         case CONTROLVM_DEVICE_CHANGESTATE:
1814                 if (cmd->device_change_state.flags.phys_device) {
1815                         parahotplug_process_message(&inmsg);
1816                 } else {
1817                         /* save the hdr and cmd structures for later use */
1818                         /* when sending back the response to Command */
1819                         my_device_changestate(&inmsg);
1820                         g_devicechangestate_packet = inmsg.cmd;
1821                         break;
1822                 }
1823                 break;
1824         case CONTROLVM_DEVICE_DESTROY:
1825                 my_device_destroy(&inmsg);
1826                 break;
1827         case CONTROLVM_DEVICE_CONFIGURE:
1828                 /* no op for now, just send a respond that we passed */
1829                 if (inmsg.hdr.flags.response_expected)
1830                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1831                 break;
1832         case CONTROLVM_CHIPSET_READY:
1833                 chipset_ready(&inmsg.hdr);
1834                 break;
1835         case CONTROLVM_CHIPSET_SELFTEST:
1836                 chipset_selftest(&inmsg.hdr);
1837                 break;
1838         case CONTROLVM_CHIPSET_STOP:
1839                 chipset_notready(&inmsg.hdr);
1840                 break;
1841         default:
1842                 if (inmsg.hdr.flags.response_expected)
1843                         controlvm_respond(&inmsg.hdr,
1844                                 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1845                 break;
1846         }
1847
1848         if (parser_ctx) {
1849                 parser_done(parser_ctx);
1850                 parser_ctx = NULL;
1851         }
1852         return true;
1853 }
1854
1855 static inline unsigned int
1856 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1857 {
1858         struct vmcall_io_controlvm_addr_params params;
1859         int result = VMCALL_SUCCESS;
1860         u64 physaddr;
1861
1862         physaddr = virt_to_phys(&params);
1863         ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1864         if (VMCALL_SUCCESSFUL(result)) {
1865                 *control_addr = params.address;
1866                 *control_bytes = params.channel_bytes;
1867         }
1868         return result;
1869 }
1870
1871 static u64 controlvm_get_channel_address(void)
1872 {
1873         u64 addr = 0;
1874         u32 size = 0;
1875
1876         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1877                 return 0;
1878
1879         return addr;
1880 }
1881
1882 static void
1883 controlvm_periodic_work(struct work_struct *work)
1884 {
1885         struct controlvm_message inmsg;
1886         bool got_command = false;
1887         bool handle_command_failed = false;
1888         static u64 poll_count;
1889
1890         /* make sure visorbus server is registered for controlvm callbacks */
1891         if (visorchipset_visorbusregwait && !visorbusregistered)
1892                 goto cleanup;
1893
1894         poll_count++;
1895         if (poll_count >= 250)
1896                 ;       /* keep going */
1897         else
1898                 goto cleanup;
1899
1900         /* Check events to determine if response to CHIPSET_READY
1901          * should be sent
1902          */
1903         if (visorchipset_holdchipsetready &&
1904             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1905                 if (check_chipset_events() == 1) {
1906                         controlvm_respond(&g_chipset_msg_hdr, 0);
1907                         clear_chipset_events();
1908                         memset(&g_chipset_msg_hdr, 0,
1909                                sizeof(struct controlvm_message_header));
1910                 }
1911         }
1912
1913         while (visorchannel_signalremove(controlvm_channel,
1914                                          CONTROLVM_QUEUE_RESPONSE,
1915                                          &inmsg))
1916                 ;
1917         if (!got_command) {
1918                 if (controlvm_pending_msg_valid) {
1919                         /* we throttled processing of a prior
1920                         * msg, so try to process it again
1921                         * rather than reading a new one
1922                         */
1923                         inmsg = controlvm_pending_msg;
1924                         controlvm_pending_msg_valid = false;
1925                         got_command = true;
1926                 } else {
1927                         got_command = read_controlvm_event(&inmsg);
1928                 }
1929         }
1930
1931         handle_command_failed = false;
1932         while (got_command && (!handle_command_failed)) {
1933                 most_recent_message_jiffies = jiffies;
1934                 if (handle_command(inmsg,
1935                                    visorchannel_get_physaddr
1936                                    (controlvm_channel)))
1937                         got_command = read_controlvm_event(&inmsg);
1938                 else {
1939                         /* this is a scenario where throttling
1940                         * is required, but probably NOT an
1941                         * error...; we stash the current
1942                         * controlvm msg so we will attempt to
1943                         * reprocess it on our next loop
1944                         */
1945                         handle_command_failed = true;
1946                         controlvm_pending_msg = inmsg;
1947                         controlvm_pending_msg_valid = true;
1948                 }
1949         }
1950
1951         /* parahotplug_worker */
1952         parahotplug_process_list();
1953
1954 cleanup:
1955
1956         if (time_after(jiffies,
1957                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1958                 /* it's been longer than MIN_IDLE_SECONDS since we
1959                 * processed our last controlvm message; slow down the
1960                 * polling
1961                 */
1962                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1963                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1964         } else {
1965                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1966                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1967         }
1968
1969         schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
1970 }
1971
1972 static void
1973 setup_crash_devices_work_queue(struct work_struct *work)
1974 {
1975         struct controlvm_message local_crash_bus_msg;
1976         struct controlvm_message local_crash_dev_msg;
1977         struct controlvm_message msg;
1978         u32 local_crash_msg_offset;
1979         u16 local_crash_msg_count;
1980
1981         /* make sure visorbus is registered for controlvm callbacks */
1982         if (visorchipset_visorbusregwait && !visorbusregistered)
1983                 goto cleanup;
1984
1985         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1986
1987         /* send init chipset msg */
1988         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1989         msg.cmd.init_chipset.bus_count = 23;
1990         msg.cmd.init_chipset.switch_count = 0;
1991
1992         chipset_init(&msg);
1993
1994         /* get saved message count */
1995         if (visorchannel_read(controlvm_channel,
1996                               offsetof(struct spar_controlvm_channel_protocol,
1997                                        saved_crash_message_count),
1998                               &local_crash_msg_count, sizeof(u16)) < 0) {
1999                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2000                                  POSTCODE_SEVERITY_ERR);
2001                 return;
2002         }
2003
2004         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
2005                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2006                                  local_crash_msg_count,
2007                                  POSTCODE_SEVERITY_ERR);
2008                 return;
2009         }
2010
2011         /* get saved crash message offset */
2012         if (visorchannel_read(controlvm_channel,
2013                               offsetof(struct spar_controlvm_channel_protocol,
2014                                        saved_crash_message_offset),
2015                               &local_crash_msg_offset, sizeof(u32)) < 0) {
2016                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2017                                  POSTCODE_SEVERITY_ERR);
2018                 return;
2019         }
2020
2021         /* read create device message for storage bus offset */
2022         if (visorchannel_read(controlvm_channel,
2023                               local_crash_msg_offset,
2024                               &local_crash_bus_msg,
2025                               sizeof(struct controlvm_message)) < 0) {
2026                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2027                                  POSTCODE_SEVERITY_ERR);
2028                 return;
2029         }
2030
2031         /* read create device message for storage device */
2032         if (visorchannel_read(controlvm_channel,
2033                               local_crash_msg_offset +
2034                               sizeof(struct controlvm_message),
2035                               &local_crash_dev_msg,
2036                               sizeof(struct controlvm_message)) < 0) {
2037                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2038                                  POSTCODE_SEVERITY_ERR);
2039                 return;
2040         }
2041
2042         /* reuse IOVM create bus message */
2043         if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
2044                 bus_create(&local_crash_bus_msg);
2045         } else {
2046                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2047                                  POSTCODE_SEVERITY_ERR);
2048                 return;
2049         }
2050
2051         /* reuse create device message for storage device */
2052         if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2053                 my_device_create(&local_crash_dev_msg);
2054         } else {
2055                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2056                                  POSTCODE_SEVERITY_ERR);
2057                 return;
2058         }
2059         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2060         return;
2061
2062 cleanup:
2063
2064         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2065
2066         schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
2067 }
2068
2069 static void
2070 bus_create_response(struct visor_device *bus_info, int response)
2071 {
2072         if (response >= 0)
2073                 bus_info->state.created = 1;
2074
2075         bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2076                       response);
2077
2078         kfree(bus_info->pending_msg_hdr);
2079         bus_info->pending_msg_hdr = NULL;
2080 }
2081
2082 static void
2083 bus_destroy_response(struct visor_device *bus_info, int response)
2084 {
2085         bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2086                       response);
2087
2088         kfree(bus_info->pending_msg_hdr);
2089         bus_info->pending_msg_hdr = NULL;
2090 }
2091
2092 static void
2093 device_create_response(struct visor_device *dev_info, int response)
2094 {
2095         if (response >= 0)
2096                 dev_info->state.created = 1;
2097
2098         device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2099                          response);
2100
2101         kfree(dev_info->pending_msg_hdr);
2102         dev_info->pending_msg_hdr = NULL;
2103 }
2104
2105 static void
2106 device_destroy_response(struct visor_device *dev_info, int response)
2107 {
2108         device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2109                          response);
2110
2111         kfree(dev_info->pending_msg_hdr);
2112         dev_info->pending_msg_hdr = NULL;
2113 }
2114
2115 static void
2116 visorchipset_device_pause_response(struct visor_device *dev_info,
2117                                    int response)
2118 {
2119         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2120                                      dev_info, response,
2121                                      segment_state_standby);
2122
2123         kfree(dev_info->pending_msg_hdr);
2124         dev_info->pending_msg_hdr = NULL;
2125 }
2126
2127 static void
2128 device_resume_response(struct visor_device *dev_info, int response)
2129 {
2130         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2131                                      dev_info, response,
2132                                      segment_state_running);
2133
2134         kfree(dev_info->pending_msg_hdr);
2135         dev_info->pending_msg_hdr = NULL;
2136 }
2137
2138 static ssize_t chipsetready_store(struct device *dev,
2139                                   struct device_attribute *attr,
2140                                   const char *buf, size_t count)
2141 {
2142         char msgtype[64];
2143
2144         if (sscanf(buf, "%63s", msgtype) != 1)
2145                 return -EINVAL;
2146
2147         if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2148                 chipset_events[0] = 1;
2149                 return count;
2150         } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2151                 chipset_events[1] = 1;
2152                 return count;
2153         }
2154         return -EINVAL;
2155 }
2156
2157 /* The parahotplug/devicedisabled interface gets called by our support script
2158  * when an SR-IOV device has been shut down. The ID is passed to the script
2159  * and then passed back when the device has been removed.
2160  */
2161 static ssize_t devicedisabled_store(struct device *dev,
2162                                     struct device_attribute *attr,
2163                                     const char *buf, size_t count)
2164 {
2165         unsigned int id;
2166
2167         if (kstrtouint(buf, 10, &id))
2168                 return -EINVAL;
2169
2170         parahotplug_request_complete(id, 0);
2171         return count;
2172 }
2173
2174 /* The parahotplug/deviceenabled interface gets called by our support script
2175  * when an SR-IOV device has been recovered. The ID is passed to the script
2176  * and then passed back when the device has been brought back up.
2177  */
2178 static ssize_t deviceenabled_store(struct device *dev,
2179                                    struct device_attribute *attr,
2180                                    const char *buf, size_t count)
2181 {
2182         unsigned int id;
2183
2184         if (kstrtouint(buf, 10, &id))
2185                 return -EINVAL;
2186
2187         parahotplug_request_complete(id, 1);
2188         return count;
2189 }
2190
2191 static int
2192 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2193 {
2194         unsigned long physaddr = 0;
2195         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2196         u64 addr = 0;
2197
2198         /* sv_enable_dfp(); */
2199         if (offset & (PAGE_SIZE - 1))
2200                 return -ENXIO;  /* need aligned offsets */
2201
2202         switch (offset) {
2203         case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2204                 vma->vm_flags |= VM_IO;
2205                 if (!*file_controlvm_channel)
2206                         return -ENXIO;
2207
2208                 visorchannel_read(*file_controlvm_channel,
2209                         offsetof(struct spar_controlvm_channel_protocol,
2210                                  gp_control_channel),
2211                         &addr, sizeof(addr));
2212                 if (!addr)
2213                         return -ENXIO;
2214
2215                 physaddr = (unsigned long)addr;
2216                 if (remap_pfn_range(vma, vma->vm_start,
2217                                     physaddr >> PAGE_SHIFT,
2218                                     vma->vm_end - vma->vm_start,
2219                                     /*pgprot_noncached */
2220                                     (vma->vm_page_prot))) {
2221                         return -EAGAIN;
2222                 }
2223                 break;
2224         default:
2225                 return -ENXIO;
2226         }
2227         return 0;
2228 }
2229
2230 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2231 {
2232         u64 result = VMCALL_SUCCESS;
2233         u64 physaddr = 0;
2234
2235         ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2236                         result);
2237         return result;
2238 }
2239
2240 static inline int issue_vmcall_update_physical_time(u64 adjustment)
2241 {
2242         int result = VMCALL_SUCCESS;
2243
2244         ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2245         return result;
2246 }
2247
2248 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2249                                unsigned long arg)
2250 {
2251         u64 adjustment;
2252         s64 vrtc_offset;
2253
2254         switch (cmd) {
2255         case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2256                 /* get the physical rtc offset */
2257                 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2258                 if (copy_to_user((void __user *)arg, &vrtc_offset,
2259                                  sizeof(vrtc_offset))) {
2260                         return -EFAULT;
2261                 }
2262                 return 0;
2263         case VMCALL_UPDATE_PHYSICAL_TIME:
2264                 if (copy_from_user(&adjustment, (void __user *)arg,
2265                                    sizeof(adjustment))) {
2266                         return -EFAULT;
2267                 }
2268                 return issue_vmcall_update_physical_time(adjustment);
2269         default:
2270                 return -EFAULT;
2271         }
2272 }
2273
2274 static const struct file_operations visorchipset_fops = {
2275         .owner = THIS_MODULE,
2276         .open = visorchipset_open,
2277         .read = NULL,
2278         .write = NULL,
2279         .unlocked_ioctl = visorchipset_ioctl,
2280         .release = visorchipset_release,
2281         .mmap = visorchipset_mmap,
2282 };
2283
2284 static int
2285 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2286 {
2287         int rc = 0;
2288
2289         file_controlvm_channel = controlvm_channel;
2290         cdev_init(&file_cdev, &visorchipset_fops);
2291         file_cdev.owner = THIS_MODULE;
2292         if (MAJOR(major_dev) == 0) {
2293                 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2294                 /* dynamic major device number registration required */
2295                 if (rc < 0)
2296                         return rc;
2297         } else {
2298                 /* static major device number registration required */
2299                 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2300                 if (rc < 0)
2301                         return rc;
2302         }
2303         rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2304         if (rc < 0) {
2305                 unregister_chrdev_region(major_dev, 1);
2306                 return rc;
2307         }
2308         return 0;
2309 }
2310
2311 static int
2312 visorchipset_init(struct acpi_device *acpi_device)
2313 {
2314         int rc = 0;
2315         u64 addr;
2316         uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2317
2318         addr = controlvm_get_channel_address();
2319         if (!addr)
2320                 return -ENODEV;
2321
2322         memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2323         memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2324
2325         controlvm_channel = visorchannel_create_with_lock(addr, 0,
2326                                                           GFP_KERNEL, uuid);
2327         if (!controlvm_channel)
2328                 return -ENODEV;
2329         if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2330                     visorchannel_get_header(controlvm_channel))) {
2331                 initialize_controlvm_payload();
2332         } else {
2333                 visorchannel_destroy(controlvm_channel);
2334                 controlvm_channel = NULL;
2335                 return -ENODEV;
2336         }
2337
2338         major_dev = MKDEV(visorchipset_major, 0);
2339         rc = visorchipset_file_init(major_dev, &controlvm_channel);
2340         if (rc < 0) {
2341                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2342                 goto cleanup;
2343         }
2344
2345         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2346
2347         /* if booting in a crash kernel */
2348         if (is_kdump_kernel())
2349                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2350                                   setup_crash_devices_work_queue);
2351         else
2352                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2353                                   controlvm_periodic_work);
2354
2355         most_recent_message_jiffies = jiffies;
2356         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2357         schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
2358
2359         visorchipset_platform_device.dev.devt = major_dev;
2360         if (platform_device_register(&visorchipset_platform_device) < 0) {
2361                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2362                 rc = -ENODEV;
2363                 goto cleanup;
2364         }
2365         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2366
2367         rc = visorbus_init();
2368 cleanup:
2369         if (rc) {
2370                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2371                                  POSTCODE_SEVERITY_ERR);
2372         }
2373         return rc;
2374 }
2375
2376 static void
2377 visorchipset_file_cleanup(dev_t major_dev)
2378 {
2379         if (file_cdev.ops)
2380                 cdev_del(&file_cdev);
2381         file_cdev.ops = NULL;
2382         unregister_chrdev_region(major_dev, 1);
2383 }
2384
2385 static int
2386 visorchipset_exit(struct acpi_device *acpi_device)
2387 {
2388         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2389
2390         visorbus_exit();
2391
2392         cancel_delayed_work_sync(&periodic_controlvm_work);
2393         destroy_controlvm_payload_info(&controlvm_payload_info);
2394
2395         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2396
2397         visorchannel_destroy(controlvm_channel);
2398
2399         visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2400         platform_device_unregister(&visorchipset_platform_device);
2401         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2402
2403         return 0;
2404 }
2405
2406 static const struct acpi_device_id unisys_device_ids[] = {
2407         {"PNP0A07", 0},
2408         {"", 0},
2409 };
2410
2411 static struct acpi_driver unisys_acpi_driver = {
2412         .name = "unisys_acpi",
2413         .class = "unisys_acpi_class",
2414         .owner = THIS_MODULE,
2415         .ids = unisys_device_ids,
2416         .ops = {
2417                 .add = visorchipset_init,
2418                 .remove = visorchipset_exit,
2419                 },
2420 };
2421
2422 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2423
2424 static __init uint32_t visorutil_spar_detect(void)
2425 {
2426         unsigned int eax, ebx, ecx, edx;
2427
2428         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2429                 /* check the ID */
2430                 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2431                 return  (ebx == UNISYS_SPAR_ID_EBX) &&
2432                         (ecx == UNISYS_SPAR_ID_ECX) &&
2433                         (edx == UNISYS_SPAR_ID_EDX);
2434         } else {
2435                 return 0;
2436         }
2437 }
2438
2439 static int init_unisys(void)
2440 {
2441         int result;
2442
2443         if (!visorutil_spar_detect())
2444                 return -ENODEV;
2445
2446         result = acpi_bus_register_driver(&unisys_acpi_driver);
2447         if (result)
2448                 return -ENODEV;
2449
2450         pr_info("Unisys Visorchipset Driver Loaded.\n");
2451         return 0;
2452 };
2453
2454 static void exit_unisys(void)
2455 {
2456         acpi_bus_unregister_driver(&unisys_acpi_driver);
2457 }
2458
2459 module_param_named(major, visorchipset_major, int, S_IRUGO);
2460 MODULE_PARM_DESC(visorchipset_major,
2461                  "major device number to use for the device node");
2462 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2463 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2464                  "1 to have the module wait for the visor bus to register");
2465 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2466                    int, S_IRUGO);
2467 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2468                  "1 to hold response to CHIPSET_READY");
2469
2470 module_init(init_unisys);
2471 module_exit(exit_unisys);
2472
2473 MODULE_AUTHOR("Unisys");
2474 MODULE_LICENSE("GPL");
2475 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2476                    VERSION);
2477 MODULE_VERSION(VERSION);