staging: unisys: include: Add comment next to mutex.
[linux-2.6-block.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /*
2  * Copyright (C) 2010 - 2015 UNISYS CORPORATION
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  * NON INFRINGEMENT.  See the GNU General Public License for more
13  * details.
14  */
15
16 #include <linux/acpi.h>
17 #include <linux/crash_dump.h>
18
19 #include "visorbus.h"
20 #include "visorbus_private.h"
21
22 /* {72120008-4AAB-11DC-8530-444553544200} */
23 #define VISOR_SIOVM_GUID \
24         GUID_INIT(0x72120008, 0x4AAB, 0x11DC, \
25                   0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
26
27 static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
28 static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
29 static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
30
31 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
32 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
33
34 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
35
36 #define UNISYS_VISOR_LEAF_ID 0x40000000
37
38 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
39 #define UNISYS_VISOR_ID_EBX 0x73696e55
40 #define UNISYS_VISOR_ID_ECX 0x70537379
41 #define UNISYS_VISOR_ID_EDX 0x34367261
42
43 /*
44  * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
45  * we switch to slow polling mode. As soon as we get a controlvm
46  * message, we switch back to fast polling mode.
47  */
48 #define MIN_IDLE_SECONDS 10
49
50 struct parser_context {
51         unsigned long allocbytes;
52         unsigned long param_bytes;
53         u8 *curr;
54         unsigned long bytes_remaining;
55         bool byte_stream;
56         char data[0];
57 };
58
59 /* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
60 #define VMCALL_CONTROLVM_ADDR 0x0501
61
62 enum vmcall_result {
63         VMCALL_RESULT_SUCCESS = 0,
64         VMCALL_RESULT_INVALID_PARAM = 1,
65         VMCALL_RESULT_DATA_UNAVAILABLE = 2,
66         VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
67         VMCALL_RESULT_DEVICE_ERROR = 4,
68         VMCALL_RESULT_DEVICE_NOT_READY = 5
69 };
70
71 /*
72  * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
73  *                                          parameters to VMCALL_CONTROLVM_ADDR
74  *                                          interface.
75  * @address:       The Guest-relative physical address of the ControlVm channel.
76  *                 This VMCall fills this in with the appropriate address.
77  *                 Contents provided by this VMCALL (OUT).
78  * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
79  *                 this in with the appropriate address. Contents provided by
80  *                 this VMCALL (OUT).
81  * @unused:        Unused Bytes in the 64-Bit Aligned Struct.
82  */
83 struct vmcall_io_controlvm_addr_params {
84         u64 address;
85         u32 channel_bytes;
86         u8 unused[4];
87 } __packed;
88
89 struct visorchipset_device {
90         struct acpi_device *acpi_device;
91         unsigned long poll_jiffies;
92         /* when we got our last controlvm message */
93         unsigned long most_recent_message_jiffies;
94         struct delayed_work periodic_controlvm_work;
95         struct visorchannel *controlvm_channel;
96         unsigned long controlvm_payload_bytes_buffered;
97         /*
98          * The following variables are used to handle the scenario where we are
99          * unable to offload the payload from a controlvm message due to memory
100          * requirements. In this scenario, we simply stash the controlvm
101          * message, then attempt to process it again the next time
102          * controlvm_periodic_work() runs.
103          */
104         struct controlvm_message controlvm_pending_msg;
105         bool controlvm_pending_msg_valid;
106         struct vmcall_io_controlvm_addr_params controlvm_params;
107 };
108
109 static struct visorchipset_device *chipset_dev;
110
111 struct parahotplug_request {
112         struct list_head list;
113         int id;
114         unsigned long expiration;
115         struct controlvm_message msg;
116 };
117
118 /* prototypes for attributes */
119 static ssize_t toolaction_show(struct device *dev,
120                                struct device_attribute *attr,
121                                char *buf)
122 {
123         u8 tool_action = 0;
124         int err;
125
126         err = visorchannel_read(chipset_dev->controlvm_channel,
127                                 offsetof(struct visor_controlvm_channel,
128                                          tool_action),
129                                 &tool_action, sizeof(u8));
130         if (err)
131                 return err;
132
133         return sprintf(buf, "%u\n", tool_action);
134 }
135
136 static ssize_t toolaction_store(struct device *dev,
137                                 struct device_attribute *attr,
138                                 const char *buf, size_t count)
139 {
140         u8 tool_action;
141         int err;
142
143         if (kstrtou8(buf, 10, &tool_action))
144                 return -EINVAL;
145
146         err = visorchannel_write(chipset_dev->controlvm_channel,
147                                  offsetof(struct visor_controlvm_channel,
148                                           tool_action),
149                                  &tool_action, sizeof(u8));
150         if (err)
151                 return err;
152         return count;
153 }
154 static DEVICE_ATTR_RW(toolaction);
155
156 static ssize_t boottotool_show(struct device *dev,
157                                struct device_attribute *attr,
158                                char *buf)
159 {
160         struct efi_visor_indication efi_visor_indication;
161         int err;
162
163         err = visorchannel_read(chipset_dev->controlvm_channel,
164                                 offsetof(struct visor_controlvm_channel,
165                                          efi_visor_ind),
166                                 &efi_visor_indication,
167                                 sizeof(struct efi_visor_indication));
168         if (err)
169                 return err;
170         return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
171 }
172
173 static ssize_t boottotool_store(struct device *dev,
174                                 struct device_attribute *attr,
175                                 const char *buf, size_t count)
176 {
177         int val, err;
178         struct efi_visor_indication efi_visor_indication;
179
180         if (kstrtoint(buf, 10, &val))
181                 return -EINVAL;
182
183         efi_visor_indication.boot_to_tool = val;
184         err = visorchannel_write(chipset_dev->controlvm_channel,
185                                  offsetof(struct visor_controlvm_channel,
186                                           efi_visor_ind),
187                                  &(efi_visor_indication),
188                                  sizeof(struct efi_visor_indication));
189         if (err)
190                 return err;
191         return count;
192 }
193 static DEVICE_ATTR_RW(boottotool);
194
195 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
196                           char *buf)
197 {
198         u32 error = 0;
199         int err;
200
201         err = visorchannel_read(chipset_dev->controlvm_channel,
202                                 offsetof(struct visor_controlvm_channel,
203                                          installation_error),
204                                 &error, sizeof(u32));
205         if (err)
206                 return err;
207         return sprintf(buf, "%u\n", error);
208 }
209
210 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
211                            const char *buf, size_t count)
212 {
213         u32 error;
214         int err;
215
216         if (kstrtou32(buf, 10, &error))
217                 return -EINVAL;
218
219         err = visorchannel_write(chipset_dev->controlvm_channel,
220                                  offsetof(struct visor_controlvm_channel,
221                                           installation_error),
222                                  &error, sizeof(u32));
223         if (err)
224                 return err;
225         return count;
226 }
227 static DEVICE_ATTR_RW(error);
228
229 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
230                            char *buf)
231 {
232         u32 text_id = 0;
233         int err;
234
235         err = visorchannel_read(chipset_dev->controlvm_channel,
236                                 offsetof(struct visor_controlvm_channel,
237                                          installation_text_id),
238                                 &text_id, sizeof(u32));
239         if (err)
240                 return err;
241
242         return sprintf(buf, "%u\n", text_id);
243 }
244
245 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
246                             const char *buf, size_t count)
247 {
248         u32 text_id;
249         int err;
250
251         if (kstrtou32(buf, 10, &text_id))
252                 return -EINVAL;
253
254         err = visorchannel_write(chipset_dev->controlvm_channel,
255                                  offsetof(struct visor_controlvm_channel,
256                                           installation_text_id),
257                                  &text_id, sizeof(u32));
258         if (err)
259                 return err;
260         return count;
261 }
262 static DEVICE_ATTR_RW(textid);
263
264 static ssize_t remaining_steps_show(struct device *dev,
265                                     struct device_attribute *attr, char *buf)
266 {
267         u16 remaining_steps = 0;
268         int err;
269
270         err = visorchannel_read(chipset_dev->controlvm_channel,
271                                 offsetof(struct visor_controlvm_channel,
272                                          installation_remaining_steps),
273                                 &remaining_steps, sizeof(u16));
274         if (err)
275                 return err;
276
277         return sprintf(buf, "%hu\n", remaining_steps);
278 }
279
280 static ssize_t remaining_steps_store(struct device *dev,
281                                      struct device_attribute *attr,
282                                      const char *buf, size_t count)
283 {
284         u16 remaining_steps;
285         int err;
286
287         if (kstrtou16(buf, 10, &remaining_steps))
288                 return -EINVAL;
289
290         err = visorchannel_write(chipset_dev->controlvm_channel,
291                                  offsetof(struct visor_controlvm_channel,
292                                           installation_remaining_steps),
293                                  &remaining_steps, sizeof(u16));
294         if (err)
295                 return err;
296         return count;
297 }
298 static DEVICE_ATTR_RW(remaining_steps);
299
300 static const guid_t *parser_id_get(struct parser_context *ctx)
301 {
302         struct visor_controlvm_parameters_header *phdr = NULL;
303
304         phdr = (struct visor_controlvm_parameters_header *)(ctx->data);
305         return &phdr->id;
306 }
307
308 static void parser_done(struct parser_context *ctx)
309 {
310         chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
311         kfree(ctx);
312 }
313
314 static void *parser_string_get(struct parser_context *ctx)
315 {
316         u8 *pscan;
317         unsigned long nscan;
318         int value_length = -1;
319         void *value = NULL;
320         int i;
321
322         pscan = ctx->curr;
323         if (!pscan)
324                 return NULL;
325         nscan = ctx->bytes_remaining;
326         if (nscan == 0)
327                 return NULL;
328
329         for (i = 0, value_length = -1; i < nscan; i++)
330                 if (pscan[i] == '\0') {
331                         value_length = i;
332                         break;
333                 }
334         /* '\0' was not included in the length */
335         if (value_length < 0)
336                 value_length = nscan;
337
338         value = kmalloc(value_length + 1, GFP_KERNEL);
339         if (!value)
340                 return NULL;
341         if (value_length > 0)
342                 memcpy(value, pscan, value_length);
343         ((u8 *)(value))[value_length] = '\0';
344         return value;
345 }
346
347 static void *parser_name_get(struct parser_context *ctx)
348 {
349         struct visor_controlvm_parameters_header *phdr = NULL;
350
351         phdr = (struct visor_controlvm_parameters_header *)(ctx->data);
352
353         if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
354                 return NULL;
355
356         ctx->curr = ctx->data + phdr->name_offset;
357         ctx->bytes_remaining = phdr->name_length;
358         return parser_string_get(ctx);
359 }
360
361 struct visor_busdev {
362         u32 bus_no;
363         u32 dev_no;
364 };
365
366 static int match_visorbus_dev_by_id(struct device *dev, void *data)
367 {
368         struct visor_device *vdev = to_visor_device(dev);
369         struct visor_busdev *id = data;
370
371         if ((vdev->chipset_bus_no == id->bus_no) &&
372             (vdev->chipset_dev_no == id->dev_no))
373                 return 1;
374
375         return 0;
376 }
377
378 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
379                                                struct visor_device *from)
380 {
381         struct device *dev;
382         struct device *dev_start = NULL;
383         struct visor_device *vdev = NULL;
384         struct visor_busdev id = {
385                 .bus_no = bus_no,
386                 .dev_no = dev_no
387         };
388
389         if (from)
390                 dev_start = &from->device;
391         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
392                               match_visorbus_dev_by_id);
393         if (dev)
394                 vdev = to_visor_device(dev);
395         return vdev;
396 }
397
398 static void controlvm_init_response(struct controlvm_message *msg,
399                                     struct controlvm_message_header *msg_hdr,
400                                     int response)
401 {
402         memset(msg, 0, sizeof(struct controlvm_message));
403         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
404         msg->hdr.payload_bytes = 0;
405         msg->hdr.payload_vm_offset = 0;
406         msg->hdr.payload_max_bytes = 0;
407         if (response < 0) {
408                 msg->hdr.flags.failed = 1;
409                 msg->hdr.completion_status = (u32)(-response);
410         }
411 }
412
413 static int controlvm_respond_chipset_init(
414                                 struct controlvm_message_header *msg_hdr,
415                                 int response,
416                                 enum visor_chipset_feature features)
417 {
418         struct controlvm_message outmsg;
419
420         controlvm_init_response(&outmsg, msg_hdr, response);
421         outmsg.cmd.init_chipset.features = features;
422         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
423                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
424 }
425
426 static int chipset_init(struct controlvm_message *inmsg)
427 {
428         static int chipset_inited;
429         enum visor_chipset_feature features = 0;
430         int rc = CONTROLVM_RESP_SUCCESS;
431         int res = 0;
432
433         if (chipset_inited) {
434                 rc = -CONTROLVM_RESP_ALREADY_DONE;
435                 res = -EIO;
436                 goto out_respond;
437         }
438         chipset_inited = 1;
439
440         /*
441          * Set features to indicate we support parahotplug (if Command
442          * also supports it).
443          */
444         features = inmsg->cmd.init_chipset.features &
445                    VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
446
447         /*
448          * Set the "reply" bit so Command knows this is a
449          * features-aware driver.
450          */
451         features |= VISOR_CHIPSET_FEATURE_REPLY;
452
453 out_respond:
454         if (inmsg->hdr.flags.response_expected)
455                 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
456
457         return res;
458 }
459
460 static int controlvm_respond(struct controlvm_message_header *msg_hdr,
461                              int response,
462                              struct visor_segment_state *state)
463 {
464         struct controlvm_message outmsg;
465
466         controlvm_init_response(&outmsg, msg_hdr, response);
467         if (outmsg.hdr.flags.test_message == 1)
468                 return -EINVAL;
469
470         if (state) {
471                 outmsg.cmd.device_change_state.state = *state;
472                 outmsg.cmd.device_change_state.flags.phys_device = 1;
473         }
474
475         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
476                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
477 }
478
479 enum crash_obj_type {
480         CRASH_DEV,
481         CRASH_BUS,
482 };
483
484 static int save_crash_message(struct controlvm_message *msg,
485                               enum crash_obj_type cr_type)
486 {
487         u32 local_crash_msg_offset;
488         u16 local_crash_msg_count;
489         int err;
490
491         err = visorchannel_read(chipset_dev->controlvm_channel,
492                                 offsetof(struct visor_controlvm_channel,
493                                          saved_crash_message_count),
494                                 &local_crash_msg_count, sizeof(u16));
495         if (err) {
496                 dev_err(&chipset_dev->acpi_device->dev,
497                         "failed to read message count\n");
498                 return err;
499         }
500
501         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
502                 dev_err(&chipset_dev->acpi_device->dev,
503                         "invalid number of messages\n");
504                 return -EIO;
505         }
506
507         err = visorchannel_read(chipset_dev->controlvm_channel,
508                                 offsetof(struct visor_controlvm_channel,
509                                          saved_crash_message_offset),
510                                 &local_crash_msg_offset, sizeof(u32));
511         if (err) {
512                 dev_err(&chipset_dev->acpi_device->dev,
513                         "failed to read offset\n");
514                 return err;
515         }
516
517         switch (cr_type) {
518         case CRASH_DEV:
519                 local_crash_msg_offset += sizeof(struct controlvm_message);
520                 err = visorchannel_write(chipset_dev->controlvm_channel,
521                                          local_crash_msg_offset,
522                                          msg,
523                                          sizeof(struct controlvm_message));
524                 if (err) {
525                         dev_err(&chipset_dev->acpi_device->dev,
526                                 "failed to write dev msg\n");
527                         return err;
528                 }
529                 break;
530         case CRASH_BUS:
531                 err = visorchannel_write(chipset_dev->controlvm_channel,
532                                          local_crash_msg_offset,
533                                          msg,
534                                          sizeof(struct controlvm_message));
535                 if (err) {
536                         dev_err(&chipset_dev->acpi_device->dev,
537                                 "failed to write bus msg\n");
538                         return err;
539                 }
540                 break;
541         default:
542                 dev_err(&chipset_dev->acpi_device->dev,
543                         "Invalid crash_obj_type\n");
544                 break;
545         }
546         return 0;
547 }
548
549 static int controlvm_responder(enum controlvm_id cmd_id,
550                                struct controlvm_message_header *pending_msg_hdr,
551                                int response)
552 {
553         if (!pending_msg_hdr)
554                 return -EIO;
555
556         if (pending_msg_hdr->id != (u32)cmd_id)
557                 return -EINVAL;
558
559         return controlvm_respond(pending_msg_hdr, response, NULL);
560 }
561
562 static int device_changestate_responder(
563                                 enum controlvm_id cmd_id,
564                                 struct visor_device *p, int response,
565                                 struct visor_segment_state response_state)
566 {
567         struct controlvm_message outmsg;
568
569         if (!p->pending_msg_hdr)
570                 return -EIO;
571         if (p->pending_msg_hdr->id != cmd_id)
572                 return -EINVAL;
573
574         controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
575
576         outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
577         outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
578         outmsg.cmd.device_change_state.state = response_state;
579
580         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
581                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
582 }
583
584 static int visorbus_create(struct controlvm_message *inmsg)
585 {
586         struct controlvm_message_packet *cmd = &inmsg->cmd;
587         struct controlvm_message_header *pmsg_hdr = NULL;
588         u32 bus_no = cmd->create_bus.bus_no;
589         struct visor_device *bus_info;
590         struct visorchannel *visorchannel;
591         int err;
592
593         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
594         if (bus_info && (bus_info->state.created == 1)) {
595                 dev_err(&chipset_dev->acpi_device->dev,
596                         "failed %s: already exists\n", __func__);
597                 err = -EEXIST;
598                 goto err_respond;
599         }
600
601         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
602         if (!bus_info) {
603                 err = -ENOMEM;
604                 goto err_respond;
605         }
606
607         INIT_LIST_HEAD(&bus_info->list_all);
608         bus_info->chipset_bus_no = bus_no;
609         bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
610
611         if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
612                 err = save_crash_message(inmsg, CRASH_BUS);
613                 if (err)
614                         goto err_free_bus_info;
615         }
616
617         if (inmsg->hdr.flags.response_expected == 1) {
618                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
619                                    GFP_KERNEL);
620                 if (!pmsg_hdr) {
621                         err = -ENOMEM;
622                         goto err_free_bus_info;
623                 }
624
625                 memcpy(pmsg_hdr, &inmsg->hdr,
626                        sizeof(struct controlvm_message_header));
627                 bus_info->pending_msg_hdr = pmsg_hdr;
628         }
629
630         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
631                                            cmd->create_bus.channel_bytes,
632                                            GFP_KERNEL,
633                                            &cmd->create_bus.bus_data_type_guid);
634         if (!visorchannel) {
635                 err = -ENOMEM;
636                 goto err_free_pending_msg;
637         }
638
639         bus_info->visorchannel = visorchannel;
640
641         /* Response will be handled by visorbus_create_instance on success */
642         err = visorbus_create_instance(bus_info);
643         if (err)
644                 goto err_destroy_channel;
645
646         return 0;
647
648 err_destroy_channel:
649         visorchannel_destroy(visorchannel);
650
651 err_free_pending_msg:
652         kfree(bus_info->pending_msg_hdr);
653
654 err_free_bus_info:
655         kfree(bus_info);
656
657 err_respond:
658         if (inmsg->hdr.flags.response_expected == 1)
659                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
660         return err;
661 }
662
663 static int visorbus_destroy(struct controlvm_message *inmsg)
664 {
665         struct controlvm_message_packet *cmd = &inmsg->cmd;
666         struct controlvm_message_header *pmsg_hdr = NULL;
667         u32 bus_no = cmd->destroy_bus.bus_no;
668         struct visor_device *bus_info;
669         int err;
670
671         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
672         if (!bus_info) {
673                 err = -ENODEV;
674                 goto err_respond;
675         }
676         if (bus_info->state.created == 0) {
677                 err = -ENOENT;
678                 goto err_respond;
679         }
680         if (bus_info->pending_msg_hdr) {
681                 /* only non-NULL if dev is still waiting on a response */
682                 err = -EEXIST;
683                 goto err_respond;
684         }
685         if (inmsg->hdr.flags.response_expected == 1) {
686                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
687                 if (!pmsg_hdr) {
688                         err = -ENOMEM;
689                         goto err_respond;
690                 }
691
692                 memcpy(pmsg_hdr, &inmsg->hdr,
693                        sizeof(struct controlvm_message_header));
694                 bus_info->pending_msg_hdr = pmsg_hdr;
695         }
696
697         /* Response will be handled by visorbus_remove_instance */
698         visorbus_remove_instance(bus_info);
699         return 0;
700
701 err_respond:
702         if (inmsg->hdr.flags.response_expected == 1)
703                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
704         return err;
705 }
706
707 static int visorbus_configure(struct controlvm_message *inmsg,
708                               struct parser_context *parser_ctx)
709 {
710         struct controlvm_message_packet *cmd = &inmsg->cmd;
711         u32 bus_no;
712         struct visor_device *bus_info;
713         int err = 0;
714
715         bus_no = cmd->configure_bus.bus_no;
716         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
717         if (!bus_info) {
718                 err = -EINVAL;
719                 goto err_respond;
720         } else if (bus_info->state.created == 0) {
721                 err = -EINVAL;
722                 goto err_respond;
723         } else if (bus_info->pending_msg_hdr) {
724                 err = -EIO;
725                 goto err_respond;
726         }
727
728         err = visorchannel_set_clientpartition
729                 (bus_info->visorchannel,
730                  cmd->configure_bus.guest_handle);
731         if (err)
732                 goto err_respond;
733
734         if (parser_ctx) {
735                 const guid_t *partition_guid = parser_id_get(parser_ctx);
736
737                 guid_copy(&bus_info->partition_guid, partition_guid);
738                 bus_info->name = parser_name_get(parser_ctx);
739         }
740
741         if (inmsg->hdr.flags.response_expected == 1)
742                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
743         return 0;
744
745 err_respond:
746         dev_err(&chipset_dev->acpi_device->dev,
747                 "%s exited with err: %d\n", __func__, err);
748         if (inmsg->hdr.flags.response_expected == 1)
749                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
750         return err;
751 }
752
753 static int visorbus_device_create(struct controlvm_message *inmsg)
754 {
755         struct controlvm_message_packet *cmd = &inmsg->cmd;
756         struct controlvm_message_header *pmsg_hdr = NULL;
757         u32 bus_no = cmd->create_device.bus_no;
758         u32 dev_no = cmd->create_device.dev_no;
759         struct visor_device *dev_info = NULL;
760         struct visor_device *bus_info;
761         struct visorchannel *visorchannel;
762         int err;
763
764         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
765         if (!bus_info) {
766                 dev_err(&chipset_dev->acpi_device->dev,
767                         "failed to get bus by id: %d\n", bus_no);
768                 err = -ENODEV;
769                 goto err_respond;
770         }
771         if (bus_info->state.created == 0) {
772                 dev_err(&chipset_dev->acpi_device->dev,
773                         "bus not created, id: %d\n", bus_no);
774                 err = -EINVAL;
775                 goto err_respond;
776         }
777
778         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
779         if (dev_info && (dev_info->state.created == 1)) {
780                 dev_err(&chipset_dev->acpi_device->dev,
781                         "failed to get bus by id: %d/%d\n", bus_no, dev_no);
782                 err = -EEXIST;
783                 goto err_respond;
784         }
785
786         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
787         if (!dev_info) {
788                 err = -ENOMEM;
789                 goto err_respond;
790         }
791
792         dev_info->chipset_bus_no = bus_no;
793         dev_info->chipset_dev_no = dev_no;
794         guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
795
796         /* not sure where the best place to set the 'parent' */
797         dev_info->device.parent = &bus_info->device;
798
799         visorchannel =
800                visorchannel_create_with_lock(cmd->create_device.channel_addr,
801                                              cmd->create_device.channel_bytes,
802                                              GFP_KERNEL,
803                                              &cmd->create_device.data_type_guid);
804         if (!visorchannel) {
805                 dev_err(&chipset_dev->acpi_device->dev,
806                         "failed to create visorchannel: %d/%d\n",
807                         bus_no, dev_no);
808                 err = -ENOMEM;
809                 goto err_free_dev_info;
810         }
811         dev_info->visorchannel = visorchannel;
812         guid_copy(&dev_info->channel_type_guid, &cmd->create_device.data_type_guid);
813         if (guid_equal(&cmd->create_device.data_type_guid, &visor_vhba_channel_guid)) {
814                 err = save_crash_message(inmsg, CRASH_DEV);
815                 if (err)
816                         goto err_destroy_visorchannel;
817         }
818
819         if (inmsg->hdr.flags.response_expected == 1) {
820                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
821                 if (!pmsg_hdr) {
822                         err = -ENOMEM;
823                         goto err_destroy_visorchannel;
824                 }
825
826                 memcpy(pmsg_hdr, &inmsg->hdr,
827                        sizeof(struct controlvm_message_header));
828                 dev_info->pending_msg_hdr = pmsg_hdr;
829         }
830         /* create_visor_device will send response */
831         err = create_visor_device(dev_info);
832         if (err)
833                 goto err_destroy_visorchannel;
834
835         return 0;
836
837 err_destroy_visorchannel:
838         visorchannel_destroy(visorchannel);
839
840 err_free_dev_info:
841         kfree(dev_info);
842
843 err_respond:
844         if (inmsg->hdr.flags.response_expected == 1)
845                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
846         return err;
847 }
848
849 static int visorbus_device_changestate(struct controlvm_message *inmsg)
850 {
851         struct controlvm_message_packet *cmd = &inmsg->cmd;
852         struct controlvm_message_header *pmsg_hdr = NULL;
853         u32 bus_no = cmd->device_change_state.bus_no;
854         u32 dev_no = cmd->device_change_state.dev_no;
855         struct visor_segment_state state = cmd->device_change_state.state;
856         struct visor_device *dev_info;
857         int err = 0;
858
859         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
860         if (!dev_info) {
861                 err = -ENODEV;
862                 goto err_respond;
863         }
864         if (dev_info->state.created == 0) {
865                 err = -EINVAL;
866                 goto err_respond;
867         }
868         if (dev_info->pending_msg_hdr) {
869                 /* only non-NULL if dev is still waiting on a response */
870                 err = -EIO;
871                 goto err_respond;
872         }
873         if (inmsg->hdr.flags.response_expected == 1) {
874                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
875                 if (!pmsg_hdr) {
876                         err = -ENOMEM;
877                         goto err_respond;
878                 }
879
880                 memcpy(pmsg_hdr, &inmsg->hdr,
881                        sizeof(struct controlvm_message_header));
882                 dev_info->pending_msg_hdr = pmsg_hdr;
883         }
884
885         if (state.alive == segment_state_running.alive &&
886             state.operating == segment_state_running.operating)
887                 /* Response will be sent from visorchipset_device_resume */
888                 err = visorchipset_device_resume(dev_info);
889         /* ServerNotReady / ServerLost / SegmentStateStandby */
890         else if (state.alive == segment_state_standby.alive &&
891                  state.operating == segment_state_standby.operating)
892                 /*
893                  * technically this is standby case where server is lost.
894                  * Response will be sent from visorchipset_device_pause.
895                  */
896                 err = visorchipset_device_pause(dev_info);
897         if (err)
898                 goto err_respond;
899
900         return 0;
901
902 err_respond:
903         dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
904         if (inmsg->hdr.flags.response_expected == 1)
905                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
906         return err;
907 }
908
909 static int visorbus_device_destroy(struct controlvm_message *inmsg)
910 {
911         struct controlvm_message_packet *cmd = &inmsg->cmd;
912         struct controlvm_message_header *pmsg_hdr = NULL;
913         u32 bus_no = cmd->destroy_device.bus_no;
914         u32 dev_no = cmd->destroy_device.dev_no;
915         struct visor_device *dev_info;
916         int err;
917
918         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
919         if (!dev_info) {
920                 err = -ENODEV;
921                 goto err_respond;
922         }
923         if (dev_info->state.created == 0) {
924                 err = -EINVAL;
925                 goto err_respond;
926         }
927         if (dev_info->pending_msg_hdr) {
928                 /* only non-NULL if dev is still waiting on a response */
929                 err = -EIO;
930                 goto err_respond;
931         }
932         if (inmsg->hdr.flags.response_expected == 1) {
933                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
934                 if (!pmsg_hdr) {
935                         err = -ENOMEM;
936                         goto err_respond;
937                 }
938
939                 memcpy(pmsg_hdr, &inmsg->hdr,
940                        sizeof(struct controlvm_message_header));
941                 dev_info->pending_msg_hdr = pmsg_hdr;
942         }
943
944         kfree(dev_info->name);
945         remove_visor_device(dev_info);
946         return 0;
947
948 err_respond:
949         if (inmsg->hdr.flags.response_expected == 1)
950                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
951         return err;
952 }
953
954 /*
955  * The general parahotplug flow works as follows. The visorchipset receives
956  * a DEVICE_CHANGESTATE message from Command specifying a physical device
957  * to enable or disable. The CONTROLVM message handler calls
958  * parahotplug_process_message, which then adds the message to a global list
959  * and kicks off a udev event which causes a user level script to enable or
960  * disable the specified device. The udev script then writes to
961  * /sys/devices/platform/visorchipset/parahotplug, which causes the
962  * parahotplug store functions to get called, at which point the
963  * appropriate CONTROLVM message is retrieved from the list and responded to.
964  */
965
966 #define PARAHOTPLUG_TIMEOUT_MS 2000
967
968 /*
969  * parahotplug_next_id() - generate unique int to match an outstanding
970  *                         CONTROLVM message with a udev script /sys
971  *                         response
972  *
973  * Return: a unique integer value
974  */
975 static int parahotplug_next_id(void)
976 {
977         static atomic_t id = ATOMIC_INIT(0);
978
979         return atomic_inc_return(&id);
980 }
981
982 /*
983  * parahotplug_next_expiration() - returns the time (in jiffies) when a
984  *                                 CONTROLVM message on the list should expire
985  *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
986  *
987  * Return: expected expiration time (in jiffies)
988  */
989 static unsigned long parahotplug_next_expiration(void)
990 {
991         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
992 }
993
994 /*
995  * parahotplug_request_create() - create a parahotplug_request, which is
996  *                                basically a wrapper for a CONTROLVM_MESSAGE
997  *                                that we can stick on a list
998  * @msg: the message to insert in the request
999  *
1000  * Return: the request containing the provided message
1001  */
1002 static struct parahotplug_request *parahotplug_request_create(
1003                                                 struct controlvm_message *msg)
1004 {
1005         struct parahotplug_request *req;
1006
1007         req = kmalloc(sizeof(*req), GFP_KERNEL);
1008         if (!req)
1009                 return NULL;
1010
1011         req->id = parahotplug_next_id();
1012         req->expiration = parahotplug_next_expiration();
1013         req->msg = *msg;
1014
1015         return req;
1016 }
1017
1018 /*
1019  * parahotplug_request_destroy() - free a parahotplug_request
1020  * @req: the request to deallocate
1021  */
1022 static void parahotplug_request_destroy(struct parahotplug_request *req)
1023 {
1024         kfree(req);
1025 }
1026
1027 static LIST_HEAD(parahotplug_request_list);
1028 /* lock for above */
1029 static DEFINE_SPINLOCK(parahotplug_request_list_lock);
1030
1031 /*
1032  * parahotplug_request_complete() - mark request as complete
1033  * @id:     the id of the request
1034  * @active: indicates whether the request is assigned to active partition
1035  *
1036  * Called from the /sys handler, which means the user script has
1037  * finished the enable/disable. Find the matching identifier, and
1038  * respond to the CONTROLVM message with success.
1039  *
1040  * Return: 0 on success or -EINVAL on failure
1041  */
1042 static int parahotplug_request_complete(int id, u16 active)
1043 {
1044         struct list_head *pos;
1045         struct list_head *tmp;
1046
1047         spin_lock(&parahotplug_request_list_lock);
1048
1049         /* Look for a request matching "id". */
1050         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1051                 struct parahotplug_request *req =
1052                     list_entry(pos, struct parahotplug_request, list);
1053                 if (req->id == id) {
1054                         /*
1055                          * Found a match. Remove it from the list and
1056                          * respond.
1057                          */
1058                         list_del(pos);
1059                         spin_unlock(&parahotplug_request_list_lock);
1060                         req->msg.cmd.device_change_state.state.active = active;
1061                         if (req->msg.hdr.flags.response_expected)
1062                                 controlvm_respond(
1063                                        &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1064                                        &req->msg.cmd.device_change_state.state);
1065                         parahotplug_request_destroy(req);
1066                         return 0;
1067                 }
1068         }
1069
1070         spin_unlock(&parahotplug_request_list_lock);
1071         return -EINVAL;
1072 }
1073
1074 /*
1075  * devicedisabled_store() - disables the hotplug device
1076  * @dev:   sysfs interface variable not utilized in this function
1077  * @attr:  sysfs interface variable not utilized in this function
1078  * @buf:   buffer containing the device id
1079  * @count: the size of the buffer
1080  *
1081  * The parahotplug/devicedisabled interface gets called by our support script
1082  * when an SR-IOV device has been shut down. The ID is passed to the script
1083  * and then passed back when the device has been removed.
1084  *
1085  * Return: the size of the buffer for success or negative for error
1086  */
1087 static ssize_t devicedisabled_store(struct device *dev,
1088                                     struct device_attribute *attr,
1089                                     const char *buf, size_t count)
1090 {
1091         unsigned int id;
1092         int err;
1093
1094         if (kstrtouint(buf, 10, &id))
1095                 return -EINVAL;
1096
1097         err = parahotplug_request_complete(id, 0);
1098         if (err < 0)
1099                 return err;
1100         return count;
1101 }
1102 static DEVICE_ATTR_WO(devicedisabled);
1103
1104 /*
1105  * deviceenabled_store() - enables the hotplug device
1106  * @dev:   sysfs interface variable not utilized in this function
1107  * @attr:  sysfs interface variable not utilized in this function
1108  * @buf:   buffer containing the device id
1109  * @count: the size of the buffer
1110  *
1111  * The parahotplug/deviceenabled interface gets called by our support script
1112  * when an SR-IOV device has been recovered. The ID is passed to the script
1113  * and then passed back when the device has been brought back up.
1114  *
1115  * Return: the size of the buffer for success or negative for error
1116  */
1117 static ssize_t deviceenabled_store(struct device *dev,
1118                                    struct device_attribute *attr,
1119                                    const char *buf, size_t count)
1120 {
1121         unsigned int id;
1122
1123         if (kstrtouint(buf, 10, &id))
1124                 return -EINVAL;
1125
1126         parahotplug_request_complete(id, 1);
1127         return count;
1128 }
1129 static DEVICE_ATTR_WO(deviceenabled);
1130
1131 static struct attribute *visorchipset_install_attrs[] = {
1132         &dev_attr_toolaction.attr,
1133         &dev_attr_boottotool.attr,
1134         &dev_attr_error.attr,
1135         &dev_attr_textid.attr,
1136         &dev_attr_remaining_steps.attr,
1137         NULL
1138 };
1139
1140 static const struct attribute_group visorchipset_install_group = {
1141         .name = "install",
1142         .attrs = visorchipset_install_attrs
1143 };
1144
1145 static struct attribute *visorchipset_parahotplug_attrs[] = {
1146         &dev_attr_devicedisabled.attr,
1147         &dev_attr_deviceenabled.attr,
1148         NULL
1149 };
1150
1151 static const struct attribute_group visorchipset_parahotplug_group = {
1152         .name = "parahotplug",
1153         .attrs = visorchipset_parahotplug_attrs
1154 };
1155
1156 static const struct attribute_group *visorchipset_dev_groups[] = {
1157         &visorchipset_install_group,
1158         &visorchipset_parahotplug_group,
1159         NULL
1160 };
1161
1162 /*
1163  * parahotplug_request_kickoff() - initiate parahotplug request
1164  * @req: the request to initiate
1165  *
1166  * Cause uevent to run the user level script to do the disable/enable specified
1167  * in the parahotplug_request.
1168  */
1169 static int parahotplug_request_kickoff(struct parahotplug_request *req)
1170 {
1171         struct controlvm_message_packet *cmd = &req->msg.cmd;
1172         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1173             env_func[40];
1174         char *envp[] = {
1175                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1176         };
1177
1178         sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
1179         sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
1180         sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
1181                 cmd->device_change_state.state.active);
1182         sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
1183                 cmd->device_change_state.bus_no);
1184         sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
1185                 cmd->device_change_state.dev_no >> 3);
1186         sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
1187                 cmd->device_change_state.dev_no & 0x7);
1188
1189         return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1190                                   KOBJ_CHANGE, envp);
1191 }
1192
1193 /*
1194  * parahotplug_process_message() - enables or disables a PCI device by kicking
1195  *                                 off a udev script
1196  * @inmsg: the message indicating whether to enable or disable
1197  */
1198 static int parahotplug_process_message(struct controlvm_message *inmsg)
1199 {
1200         struct parahotplug_request *req;
1201         int err;
1202
1203         req = parahotplug_request_create(inmsg);
1204         if (!req)
1205                 return -ENOMEM;
1206
1207         /*
1208          * For enable messages, just respond with success right away, we don't
1209          * need to wait to see if the enable was successful.
1210          */
1211         if (inmsg->cmd.device_change_state.state.active) {
1212                 err = parahotplug_request_kickoff(req);
1213                 if (err)
1214                         goto err_respond;
1215                 controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
1216                                   &inmsg->cmd.device_change_state.state);
1217                 parahotplug_request_destroy(req);
1218                 return 0;
1219         }
1220
1221         /*
1222          * For disable messages, add the request to the
1223          * request list before kicking off the udev script. It
1224          * won't get responded to until the script has
1225          * indicated it's done.
1226          */
1227         spin_lock(&parahotplug_request_list_lock);
1228         list_add_tail(&req->list, &parahotplug_request_list);
1229         spin_unlock(&parahotplug_request_list_lock);
1230
1231         err = parahotplug_request_kickoff(req);
1232         if (err)
1233                 goto err_respond;
1234         return 0;
1235
1236 err_respond:
1237         controlvm_respond(&inmsg->hdr, err,
1238                           &inmsg->cmd.device_change_state.state);
1239         return err;
1240 }
1241
1242 /*
1243  * chipset_ready_uevent() - sends chipset_ready action
1244  *
1245  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1246  *
1247  * Return: 0 on success, negative on failure
1248  */
1249 static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1250 {
1251         int res;
1252
1253         res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1254                              KOBJ_ONLINE);
1255
1256         if (msg_hdr->flags.response_expected)
1257                 controlvm_respond(msg_hdr, res, NULL);
1258
1259         return res;
1260 }
1261
1262 /*
1263  * chipset_selftest_uevent() - sends chipset_selftest action
1264  *
1265  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1266  *
1267  * Return: 0 on success, negative on failure
1268  */
1269 static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1270 {
1271         char env_selftest[20];
1272         char *envp[] = { env_selftest, NULL };
1273         int res;
1274
1275         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1276         res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1277                                  KOBJ_CHANGE, envp);
1278
1279         if (msg_hdr->flags.response_expected)
1280                 controlvm_respond(msg_hdr, res, NULL);
1281
1282         return res;
1283 }
1284
1285 /*
1286  * chipset_notready_uevent() - sends chipset_notready action
1287  *
1288  * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1289  *
1290  * Return: 0 on success, negative on failure
1291  */
1292 static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1293 {
1294         int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1295                              KOBJ_OFFLINE);
1296
1297         if (msg_hdr->flags.response_expected)
1298                 controlvm_respond(msg_hdr, res, NULL);
1299
1300         return res;
1301 }
1302
1303 static int unisys_vmcall(unsigned long tuple, unsigned long param)
1304 {
1305         int result = 0;
1306         unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
1307         unsigned long reg_ebx;
1308         unsigned long reg_ecx;
1309
1310         reg_ebx = param & 0xFFFFFFFF;
1311         reg_ecx = param >> 32;
1312
1313         cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
1314         if (!(cpuid_ecx & 0x80000000))
1315                 return -EPERM;
1316
1317         __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
1318                 "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
1319         if (result)
1320                 goto error;
1321
1322         return 0;
1323 /* Need to convert from VMCALL error codes to Linux */
1324 error:
1325         switch (result) {
1326         case VMCALL_RESULT_INVALID_PARAM:
1327                 return -EINVAL;
1328         case VMCALL_RESULT_DATA_UNAVAILABLE:
1329                 return -ENODEV;
1330         default:
1331                 return -EFAULT;
1332         }
1333 }
1334
1335 static unsigned int issue_vmcall_io_controlvm_addr(u64 *control_addr,
1336                                                    u32 *control_bytes)
1337 {
1338         u64 physaddr;
1339         int err;
1340
1341         physaddr = virt_to_phys(&chipset_dev->controlvm_params);
1342         err = unisys_vmcall(VMCALL_CONTROLVM_ADDR, physaddr);
1343         if (err)
1344                 return err;
1345
1346         *control_addr = chipset_dev->controlvm_params.address;
1347         *control_bytes = chipset_dev->controlvm_params.channel_bytes;
1348
1349         return 0;
1350 }
1351
1352 static u64 controlvm_get_channel_address(void)
1353 {
1354         u64 addr = 0;
1355         u32 size = 0;
1356
1357         if (issue_vmcall_io_controlvm_addr(&addr, &size))
1358                 return 0;
1359
1360         return addr;
1361 }
1362
1363 static void setup_crash_devices_work_queue(struct work_struct *work)
1364 {
1365         struct controlvm_message local_crash_bus_msg;
1366         struct controlvm_message local_crash_dev_msg;
1367         struct controlvm_message msg;
1368         u32 local_crash_msg_offset;
1369         u16 local_crash_msg_count;
1370
1371         /* send init chipset msg */
1372         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1373         msg.cmd.init_chipset.bus_count = 23;
1374         msg.cmd.init_chipset.switch_count = 0;
1375
1376         chipset_init(&msg);
1377
1378         /* get saved message count */
1379         if (visorchannel_read(chipset_dev->controlvm_channel,
1380                               offsetof(struct visor_controlvm_channel,
1381                                        saved_crash_message_count),
1382                               &local_crash_msg_count, sizeof(u16)) < 0) {
1383                 dev_err(&chipset_dev->acpi_device->dev,
1384                         "failed to read channel\n");
1385                 return;
1386         }
1387
1388         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1389                 dev_err(&chipset_dev->acpi_device->dev,
1390                         "invalid count\n");
1391                 return;
1392         }
1393
1394         /* get saved crash message offset */
1395         if (visorchannel_read(chipset_dev->controlvm_channel,
1396                               offsetof(struct visor_controlvm_channel,
1397                                        saved_crash_message_offset),
1398                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1399                 dev_err(&chipset_dev->acpi_device->dev,
1400                         "failed to read channel\n");
1401                 return;
1402         }
1403
1404         /* read create device message for storage bus offset */
1405         if (visorchannel_read(chipset_dev->controlvm_channel,
1406                               local_crash_msg_offset,
1407                               &local_crash_bus_msg,
1408                               sizeof(struct controlvm_message)) < 0) {
1409                 dev_err(&chipset_dev->acpi_device->dev,
1410                         "failed to read channel\n");
1411                 return;
1412         }
1413
1414         /* read create device message for storage device */
1415         if (visorchannel_read(chipset_dev->controlvm_channel,
1416                               local_crash_msg_offset +
1417                               sizeof(struct controlvm_message),
1418                               &local_crash_dev_msg,
1419                               sizeof(struct controlvm_message)) < 0) {
1420                 dev_err(&chipset_dev->acpi_device->dev,
1421                         "failed to read channel\n");
1422                 return;
1423         }
1424
1425         /* reuse IOVM create bus message */
1426         if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
1427                 dev_err(&chipset_dev->acpi_device->dev,
1428                         "no valid create_bus message\n");
1429                 return;
1430         }
1431         visorbus_create(&local_crash_bus_msg);
1432
1433         /* reuse create device message for storage device */
1434         if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
1435                 dev_err(&chipset_dev->acpi_device->dev,
1436                         "no valid create_device message\n");
1437                 return;
1438         }
1439         visorbus_device_create(&local_crash_dev_msg);
1440 }
1441
1442 void visorbus_response(struct visor_device *bus_info, int response,
1443                        int controlvm_id)
1444 {
1445         controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
1446
1447         kfree(bus_info->pending_msg_hdr);
1448         bus_info->pending_msg_hdr = NULL;
1449 }
1450
1451 void visorbus_device_changestate_response(struct visor_device *dev_info,
1452                                           int response,
1453                                           struct visor_segment_state state)
1454 {
1455         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1456                                      dev_info, response, state);
1457
1458         kfree(dev_info->pending_msg_hdr);
1459         dev_info->pending_msg_hdr = NULL;
1460 }
1461
1462 static struct parser_context *parser_init_byte_stream(u64 addr, u32 bytes,
1463                                                       bool *retry)
1464 {
1465         int allocbytes = sizeof(struct parser_context) + bytes;
1466         struct parser_context *ctx;
1467         void *mapping;
1468
1469         *retry = false;
1470
1471         /*
1472          * alloc an 0 extra byte to ensure payload is
1473          * '\0'-terminated
1474          */
1475         allocbytes++;
1476         if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
1477             > MAX_CONTROLVM_PAYLOAD_BYTES) {
1478                 *retry = true;
1479                 return NULL;
1480         }
1481         ctx = kzalloc(allocbytes, GFP_KERNEL);
1482         if (!ctx) {
1483                 *retry = true;
1484                 return NULL;
1485         }
1486
1487         ctx->allocbytes = allocbytes;
1488         ctx->param_bytes = bytes;
1489         ctx->curr = NULL;
1490         ctx->bytes_remaining = 0;
1491         ctx->byte_stream = false;
1492         mapping = memremap(addr, bytes, MEMREMAP_WB);
1493         if (!mapping)
1494                 goto err_finish_ctx;
1495         memcpy(ctx->data, mapping, bytes);
1496         memunmap(mapping);
1497         ctx->byte_stream = true;
1498         chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1499
1500         return ctx;
1501
1502 err_finish_ctx:
1503         kfree(ctx);
1504         return NULL;
1505 }
1506
1507 /*
1508  * handle_command() - process a controlvm message
1509  * @inmsg:        the message to process
1510  * @channel_addr: address of the controlvm channel
1511  *
1512  * Return:
1513  *      0       - Successfully processed the message
1514  *      -EAGAIN - ControlVM message was not processed and should be retried
1515  *                reading the next controlvm message; a scenario where this can
1516  *                occur is when we need to throttle the allocation of memory in
1517  *                which to copy out controlvm payload data.
1518  *      < 0     - error: ControlVM message was processed but an error occurred.
1519  */
1520 static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
1521 {
1522         struct controlvm_message_packet *cmd = &inmsg.cmd;
1523         u64 parm_addr;
1524         u32 parm_bytes;
1525         struct parser_context *parser_ctx = NULL;
1526         struct controlvm_message ackmsg;
1527         int err = 0;
1528
1529         /* create parsing context if necessary */
1530         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1531         parm_bytes = inmsg.hdr.payload_bytes;
1532
1533         /*
1534          * Parameter and channel addresses within test messages actually lie
1535          * within our OS-controlled memory. We need to know that, because it
1536          * makes a difference in how we compute the virtual address.
1537          */
1538         if (parm_addr && parm_bytes) {
1539                 bool retry = false;
1540
1541                 parser_ctx =
1542                     parser_init_byte_stream(parm_addr, parm_bytes, &retry);
1543                 if (!parser_ctx && retry)
1544                         return -EAGAIN;
1545         }
1546         controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1547         err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1548                                         CONTROLVM_QUEUE_ACK, &ackmsg);
1549         if (err)
1550                 return err;
1551
1552         switch (inmsg.hdr.id) {
1553         case CONTROLVM_CHIPSET_INIT:
1554                 err = chipset_init(&inmsg);
1555                 break;
1556         case CONTROLVM_BUS_CREATE:
1557                 err = visorbus_create(&inmsg);
1558                 break;
1559         case CONTROLVM_BUS_DESTROY:
1560                 err = visorbus_destroy(&inmsg);
1561                 break;
1562         case CONTROLVM_BUS_CONFIGURE:
1563                 err = visorbus_configure(&inmsg, parser_ctx);
1564                 break;
1565         case CONTROLVM_DEVICE_CREATE:
1566                 err = visorbus_device_create(&inmsg);
1567                 break;
1568         case CONTROLVM_DEVICE_CHANGESTATE:
1569                 if (cmd->device_change_state.flags.phys_device) {
1570                         err = parahotplug_process_message(&inmsg);
1571                 } else {
1572                         /*
1573                          * save the hdr and cmd structures for later use
1574                          * when sending back the response to Command
1575                          */
1576                         err = visorbus_device_changestate(&inmsg);
1577                         break;
1578                 }
1579                 break;
1580         case CONTROLVM_DEVICE_DESTROY:
1581                 err = visorbus_device_destroy(&inmsg);
1582                 break;
1583         case CONTROLVM_DEVICE_CONFIGURE:
1584                 /* no op just send a respond that we passed */
1585                 if (inmsg.hdr.flags.response_expected)
1586                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
1587                                           NULL);
1588                 break;
1589         case CONTROLVM_CHIPSET_READY:
1590                 err = chipset_ready_uevent(&inmsg.hdr);
1591                 break;
1592         case CONTROLVM_CHIPSET_SELFTEST:
1593                 err = chipset_selftest_uevent(&inmsg.hdr);
1594                 break;
1595         case CONTROLVM_CHIPSET_STOP:
1596                 err = chipset_notready_uevent(&inmsg.hdr);
1597                 break;
1598         default:
1599                 err = -ENOMSG;
1600                 if (inmsg.hdr.flags.response_expected)
1601                         controlvm_respond(&inmsg.hdr,
1602                                           -CONTROLVM_RESP_ID_UNKNOWN, NULL);
1603                 break;
1604         }
1605
1606         if (parser_ctx) {
1607                 parser_done(parser_ctx);
1608                 parser_ctx = NULL;
1609         }
1610         return err;
1611 }
1612
1613 /*
1614  * read_controlvm_event() - retreives the next message from the
1615  *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
1616  *                          channel
1617  * @msg: pointer to the retrieved message
1618  *
1619  * Return: 0 if valid message was retrieved or -error
1620  */
1621 static int read_controlvm_event(struct controlvm_message *msg)
1622 {
1623         int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1624                                         CONTROLVM_QUEUE_EVENT, msg);
1625         if (err)
1626                 return err;
1627
1628         /* got a message */
1629         if (msg->hdr.flags.test_message == 1)
1630                 return -EINVAL;
1631
1632         return 0;
1633 }
1634
1635 /*
1636  * parahotplug_process_list() - remove any request from the list that's been on
1637  *                              there too long and respond with an error
1638  */
1639 static void parahotplug_process_list(void)
1640 {
1641         struct list_head *pos;
1642         struct list_head *tmp;
1643
1644         spin_lock(&parahotplug_request_list_lock);
1645
1646         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1647                 struct parahotplug_request *req =
1648                     list_entry(pos, struct parahotplug_request, list);
1649
1650                 if (!time_after_eq(jiffies, req->expiration))
1651                         continue;
1652
1653                 list_del(pos);
1654                 if (req->msg.hdr.flags.response_expected)
1655                         controlvm_respond(
1656                                 &req->msg.hdr,
1657                                 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1658                                 &req->msg.cmd.device_change_state.state);
1659                 parahotplug_request_destroy(req);
1660         }
1661
1662         spin_unlock(&parahotplug_request_list_lock);
1663 }
1664
1665 static void controlvm_periodic_work(struct work_struct *work)
1666 {
1667         struct controlvm_message inmsg;
1668         int count = 0;
1669         int err;
1670
1671         /* Drain the RESPONSE queue make it empty */
1672         do {
1673                 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1674                                                 CONTROLVM_QUEUE_RESPONSE,
1675                                                 &inmsg);
1676         } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
1677
1678         if (err != -EAGAIN)
1679                 goto schedule_out;
1680
1681         if (chipset_dev->controlvm_pending_msg_valid) {
1682                 /*
1683                  * we throttled processing of a prior
1684                  * msg, so try to process it again
1685                  * rather than reading a new one
1686                  */
1687                 inmsg = chipset_dev->controlvm_pending_msg;
1688                 chipset_dev->controlvm_pending_msg_valid = false;
1689                 err = 0;
1690         } else {
1691                 err = read_controlvm_event(&inmsg);
1692         }
1693
1694         while (!err) {
1695                 chipset_dev->most_recent_message_jiffies = jiffies;
1696                 err = handle_command(inmsg,
1697                                      visorchannel_get_physaddr
1698                                      (chipset_dev->controlvm_channel));
1699                 if (err == -EAGAIN) {
1700                         chipset_dev->controlvm_pending_msg = inmsg;
1701                         chipset_dev->controlvm_pending_msg_valid = true;
1702                         break;
1703                 }
1704
1705                 err = read_controlvm_event(&inmsg);
1706         }
1707
1708         /* parahotplug_worker */
1709         parahotplug_process_list();
1710
1711 /*
1712  * The controlvm messages are sent in a bulk. If we start receiving messages, we
1713  * want the polling to be fast. If we do not receive any message for
1714  * MIN_IDLE_SECONDS, we can slow down the polling.
1715  */
1716 schedule_out:
1717         if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1718                                 (HZ * MIN_IDLE_SECONDS))) {
1719                 /*
1720                  * it's been longer than MIN_IDLE_SECONDS since we
1721                  * processed our last controlvm message; slow down the
1722                  * polling
1723                  */
1724                 if (chipset_dev->poll_jiffies !=
1725                                               POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1726                         chipset_dev->poll_jiffies =
1727                                               POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1728         } else {
1729                 if (chipset_dev->poll_jiffies !=
1730                                               POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1731                         chipset_dev->poll_jiffies =
1732                                               POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1733         }
1734
1735         schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1736                               chipset_dev->poll_jiffies);
1737 }
1738
1739 static int visorchipset_init(struct acpi_device *acpi_device)
1740 {
1741         int err = -ENODEV;
1742         u64 addr;
1743         struct visorchannel *controlvm_channel;
1744
1745         chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1746         if (!chipset_dev)
1747                 goto error;
1748
1749         addr = controlvm_get_channel_address();
1750         if (!addr)
1751                 goto error;
1752
1753         acpi_device->driver_data = chipset_dev;
1754         chipset_dev->acpi_device = acpi_device;
1755         chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1756         controlvm_channel = visorchannel_create_with_lock(addr, 0, GFP_KERNEL,
1757                                                 &visor_controlvm_channel_guid);
1758         if (!controlvm_channel)
1759                 goto error_free_chipset_dev;
1760
1761         chipset_dev->controlvm_channel = controlvm_channel;
1762
1763         err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1764                                   visorchipset_dev_groups);
1765         if (err < 0)
1766                 goto error_destroy_channel;
1767
1768         if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
1769                                  &visor_controlvm_channel_guid,
1770                                  "controlvm",
1771                                  sizeof(struct visor_controlvm_channel),
1772                                  VISOR_CONTROLVM_CHANNEL_VERSIONID,
1773                                  VISOR_CHANNEL_SIGNATURE))
1774                 goto error_delete_groups;
1775
1776         /* if booting in a crash kernel */
1777         if (is_kdump_kernel())
1778                 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1779                                   setup_crash_devices_work_queue);
1780         else
1781                 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1782                                   controlvm_periodic_work);
1783
1784         chipset_dev->most_recent_message_jiffies = jiffies;
1785         chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1786         schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1787                               chipset_dev->poll_jiffies);
1788
1789         err = visorbus_init();
1790         if (err < 0)
1791                 goto error_cancel_work;
1792
1793         return 0;
1794
1795 error_cancel_work:
1796         cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1797
1798 error_delete_groups:
1799         sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1800                             visorchipset_dev_groups);
1801
1802 error_destroy_channel:
1803         visorchannel_destroy(chipset_dev->controlvm_channel);
1804
1805 error_free_chipset_dev:
1806         kfree(chipset_dev);
1807
1808 error:
1809         dev_err(&acpi_device->dev, "failed with error %d\n", err);
1810         return err;
1811 }
1812
1813 static int visorchipset_exit(struct acpi_device *acpi_device)
1814 {
1815         visorbus_exit();
1816         cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1817         sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1818                             visorchipset_dev_groups);
1819
1820         visorchannel_destroy(chipset_dev->controlvm_channel);
1821         kfree(chipset_dev);
1822
1823         return 0;
1824 }
1825
1826 static const struct acpi_device_id unisys_device_ids[] = {
1827         {"PNP0A07", 0},
1828         {"", 0},
1829 };
1830
1831 static struct acpi_driver unisys_acpi_driver = {
1832         .name = "unisys_acpi",
1833         .class = "unisys_acpi_class",
1834         .owner = THIS_MODULE,
1835         .ids = unisys_device_ids,
1836         .ops = {
1837                 .add = visorchipset_init,
1838                 .remove = visorchipset_exit,
1839         },
1840 };
1841
1842 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1843
1844 static __init int visorutil_spar_detect(void)
1845 {
1846         unsigned int eax, ebx, ecx, edx;
1847
1848         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1849                 /* check the ID */
1850                 cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1851                 return  (ebx == UNISYS_VISOR_ID_EBX) &&
1852                         (ecx == UNISYS_VISOR_ID_ECX) &&
1853                         (edx == UNISYS_VISOR_ID_EDX);
1854         } else {
1855                 return 0;
1856         }
1857 }
1858
1859 static int init_unisys(void)
1860 {
1861         int result;
1862
1863         if (!visorutil_spar_detect())
1864                 return -ENODEV;
1865
1866         result = acpi_bus_register_driver(&unisys_acpi_driver);
1867         if (result)
1868                 return -ENODEV;
1869
1870         pr_info("Unisys Visorchipset Driver Loaded.\n");
1871         return 0;
1872 };
1873
1874 static void exit_unisys(void)
1875 {
1876         acpi_bus_unregister_driver(&unisys_acpi_driver);
1877 }
1878
1879 module_init(init_unisys);
1880 module_exit(exit_unisys);
1881
1882 MODULE_AUTHOR("Unisys");
1883 MODULE_LICENSE("GPL");
1884 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");