staging: unisys: visorbus: get rid of POSTCODEs in save_crash_msg
[linux-2.6-block.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  */
16
17 #include <linux/acpi.h>
18 #include <linux/ctype.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include <linux/nls.h>
22 #include <linux/netdevice.h>
23 #include <linux/uuid.h>
24 #include <linux/crash_dump.h>
25
26 #include "visorbus.h"
27 #include "visorbus_private.h"
28 #include "vmcallinterface.h"
29
30 #define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
31
32 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
33 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
34
35 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
36
37 #define UNISYS_SPAR_LEAF_ID 0x40000000
38
39 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
40 #define UNISYS_SPAR_ID_EBX 0x73696e55
41 #define UNISYS_SPAR_ID_ECX 0x70537379
42 #define UNISYS_SPAR_ID_EDX 0x34367261
43
44 /*
45  * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
46  * we switch to slow polling mode. As soon as we get a controlvm
47  * message, we switch back to fast polling mode.
48  */
49 #define MIN_IDLE_SECONDS 10
50
51 struct parser_context {
52         unsigned long allocbytes;
53         unsigned long param_bytes;
54         u8 *curr;
55         unsigned long bytes_remaining;
56         bool byte_stream;
57         char data[0];
58 };
59
60 struct visorchipset_device {
61         struct acpi_device *acpi_device;
62         unsigned long poll_jiffies;
63         /* when we got our last controlvm message */
64         unsigned long most_recent_message_jiffies;
65         struct delayed_work periodic_controlvm_work;
66         struct visorchannel *controlvm_channel;
67         unsigned long controlvm_payload_bytes_buffered;
68         /*
69          * The following variables are used to handle the scenario where we are
70          * unable to offload the payload from a controlvm message due to memory
71          * requirements. In this scenario, we simply stash the controlvm
72          * message, then attempt to process it again the next time
73          * controlvm_periodic_work() runs.
74          */
75         struct controlvm_message controlvm_pending_msg;
76         bool controlvm_pending_msg_valid;
77 };
78
79 static struct visorchipset_device *chipset_dev;
80
81 struct parahotplug_request {
82         struct list_head list;
83         int id;
84         unsigned long expiration;
85         struct controlvm_message msg;
86 };
87
88 /* prototypes for attributes */
89 static ssize_t toolaction_show(struct device *dev,
90                                struct device_attribute *attr,
91                                char *buf)
92 {
93         u8 tool_action = 0;
94         int err;
95
96         err = visorchannel_read(chipset_dev->controlvm_channel,
97                                 offsetof(struct spar_controlvm_channel_protocol,
98                                          tool_action),
99                                 &tool_action, sizeof(u8));
100         if (err)
101                 return err;
102
103         return sprintf(buf, "%u\n", tool_action);
104 }
105
106 static ssize_t toolaction_store(struct device *dev,
107                                 struct device_attribute *attr,
108                                 const char *buf, size_t count)
109 {
110         u8 tool_action;
111         int err;
112
113         if (kstrtou8(buf, 10, &tool_action))
114                 return -EINVAL;
115
116         err = visorchannel_write
117                 (chipset_dev->controlvm_channel,
118                  offsetof(struct spar_controlvm_channel_protocol,
119                           tool_action),
120                  &tool_action, sizeof(u8));
121
122         if (err)
123                 return err;
124         return count;
125 }
126 static DEVICE_ATTR_RW(toolaction);
127
128 static ssize_t boottotool_show(struct device *dev,
129                                struct device_attribute *attr,
130                                char *buf)
131 {
132         struct efi_spar_indication efi_spar_indication;
133         int err;
134
135         err = visorchannel_read(chipset_dev->controlvm_channel,
136                                 offsetof(struct spar_controlvm_channel_protocol,
137                                          efi_spar_ind),
138                                 &efi_spar_indication,
139                                 sizeof(struct efi_spar_indication));
140
141         if (err)
142                 return err;
143         return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
144 }
145
146 static ssize_t boottotool_store(struct device *dev,
147                                 struct device_attribute *attr,
148                                 const char *buf, size_t count)
149 {
150         int val, err;
151         struct efi_spar_indication efi_spar_indication;
152
153         if (kstrtoint(buf, 10, &val))
154                 return -EINVAL;
155
156         efi_spar_indication.boot_to_tool = val;
157         err = visorchannel_write
158                 (chipset_dev->controlvm_channel,
159                  offsetof(struct spar_controlvm_channel_protocol,
160                           efi_spar_ind), &(efi_spar_indication),
161                  sizeof(struct efi_spar_indication));
162
163         if (err)
164                 return err;
165         return count;
166 }
167 static DEVICE_ATTR_RW(boottotool);
168
169 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
170                           char *buf)
171 {
172         u32 error = 0;
173         int err;
174
175         err = visorchannel_read(chipset_dev->controlvm_channel,
176                                 offsetof(struct spar_controlvm_channel_protocol,
177                                          installation_error),
178                                 &error, sizeof(u32));
179         if (err)
180                 return err;
181         return sprintf(buf, "%i\n", error);
182 }
183
184 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
185                            const char *buf, size_t count)
186 {
187         u32 error;
188         int err;
189
190         if (kstrtou32(buf, 10, &error))
191                 return -EINVAL;
192
193         err = visorchannel_write
194                 (chipset_dev->controlvm_channel,
195                  offsetof(struct spar_controlvm_channel_protocol,
196                           installation_error),
197                  &error, sizeof(u32));
198         if (err)
199                 return err;
200         return count;
201 }
202 static DEVICE_ATTR_RW(error);
203
204 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
205                            char *buf)
206 {
207         u32 text_id = 0;
208         int err;
209
210         err = visorchannel_read
211                         (chipset_dev->controlvm_channel,
212                          offsetof(struct spar_controlvm_channel_protocol,
213                                   installation_text_id),
214                          &text_id, sizeof(u32));
215         if (err)
216                 return err;
217
218         return sprintf(buf, "%i\n", text_id);
219 }
220
221 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
222                             const char *buf, size_t count)
223 {
224         u32 text_id;
225         int err;
226
227         if (kstrtou32(buf, 10, &text_id))
228                 return -EINVAL;
229
230         err = visorchannel_write
231                 (chipset_dev->controlvm_channel,
232                  offsetof(struct spar_controlvm_channel_protocol,
233                           installation_text_id),
234                  &text_id, sizeof(u32));
235         if (err)
236                 return err;
237         return count;
238 }
239 static DEVICE_ATTR_RW(textid);
240
241 static ssize_t remaining_steps_show(struct device *dev,
242                                     struct device_attribute *attr, char *buf)
243 {
244         u16 remaining_steps = 0;
245         int err;
246
247         err = visorchannel_read(chipset_dev->controlvm_channel,
248                                 offsetof(struct spar_controlvm_channel_protocol,
249                                          installation_remaining_steps),
250                                 &remaining_steps, sizeof(u16));
251         if (err)
252                 return err;
253
254         return sprintf(buf, "%hu\n", remaining_steps);
255 }
256
257 static ssize_t remaining_steps_store(struct device *dev,
258                                      struct device_attribute *attr,
259                                      const char *buf, size_t count)
260 {
261         u16 remaining_steps;
262         int err;
263
264         if (kstrtou16(buf, 10, &remaining_steps))
265                 return -EINVAL;
266
267         err = visorchannel_write
268                 (chipset_dev->controlvm_channel,
269                  offsetof(struct spar_controlvm_channel_protocol,
270                           installation_remaining_steps),
271                  &remaining_steps, sizeof(u16));
272         if (err)
273                 return err;
274         return count;
275 }
276 static DEVICE_ATTR_RW(remaining_steps);
277
278 static uuid_le
279 parser_id_get(struct parser_context *ctx)
280 {
281         struct spar_controlvm_parameters_header *phdr = NULL;
282
283         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
284         return phdr->id;
285 }
286
287 static void parser_done(struct parser_context *ctx)
288 {
289         chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
290         kfree(ctx);
291 }
292
293 static void *
294 parser_string_get(struct parser_context *ctx)
295 {
296         u8 *pscan;
297         unsigned long nscan;
298         int value_length = -1;
299         void *value = NULL;
300         int i;
301
302         pscan = ctx->curr;
303         nscan = ctx->bytes_remaining;
304         if (nscan == 0)
305                 return NULL;
306         if (!pscan)
307                 return NULL;
308         for (i = 0, value_length = -1; i < nscan; i++)
309                 if (pscan[i] == '\0') {
310                         value_length = i;
311                         break;
312                 }
313         if (value_length < 0)   /* '\0' was not included in the length */
314                 value_length = nscan;
315         value = kmalloc(value_length + 1, GFP_KERNEL);
316         if (!value)
317                 return NULL;
318         if (value_length > 0)
319                 memcpy(value, pscan, value_length);
320         ((u8 *)(value))[value_length] = '\0';
321         return value;
322 }
323
324 static void *
325 parser_name_get(struct parser_context *ctx)
326 {
327         struct spar_controlvm_parameters_header *phdr = NULL;
328
329         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
330
331         if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
332                 return NULL;
333
334         ctx->curr = ctx->data + phdr->name_offset;
335         ctx->bytes_remaining = phdr->name_length;
336         return parser_string_get(ctx);
337 }
338
339 struct visor_busdev {
340         u32 bus_no;
341         u32 dev_no;
342 };
343
344 static int match_visorbus_dev_by_id(struct device *dev, void *data)
345 {
346         struct visor_device *vdev = to_visor_device(dev);
347         struct visor_busdev *id = data;
348         u32 bus_no = id->bus_no;
349         u32 dev_no = id->dev_no;
350
351         if ((vdev->chipset_bus_no == bus_no) &&
352             (vdev->chipset_dev_no == dev_no))
353                 return 1;
354
355         return 0;
356 }
357
358 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
359                                                struct visor_device *from)
360 {
361         struct device *dev;
362         struct device *dev_start = NULL;
363         struct visor_device *vdev = NULL;
364         struct visor_busdev id = {
365                         .bus_no = bus_no,
366                         .dev_no = dev_no
367                 };
368
369         if (from)
370                 dev_start = &from->device;
371         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
372                               match_visorbus_dev_by_id);
373         if (dev)
374                 vdev = to_visor_device(dev);
375         return vdev;
376 }
377
378 static void
379 controlvm_init_response(struct controlvm_message *msg,
380                         struct controlvm_message_header *msg_hdr, int response)
381 {
382         memset(msg, 0, sizeof(struct controlvm_message));
383         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
384         msg->hdr.payload_bytes = 0;
385         msg->hdr.payload_vm_offset = 0;
386         msg->hdr.payload_max_bytes = 0;
387         if (response < 0) {
388                 msg->hdr.flags.failed = 1;
389                 msg->hdr.completion_status = (u32)(-response);
390         }
391 }
392
393 static int
394 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
395                                int response,
396                                enum ultra_chipset_feature features)
397 {
398         struct controlvm_message outmsg;
399
400         controlvm_init_response(&outmsg, msg_hdr, response);
401         outmsg.cmd.init_chipset.features = features;
402         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
403                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
404 }
405
406 static int
407 chipset_init(struct controlvm_message *inmsg)
408 {
409         static int chipset_inited;
410         enum ultra_chipset_feature features = 0;
411         int rc = CONTROLVM_RESP_SUCCESS;
412         int res = 0;
413
414         if (chipset_inited) {
415                 rc = -CONTROLVM_RESP_ALREADY_DONE;
416                 res = -EIO;
417                 goto out_respond;
418         }
419         chipset_inited = 1;
420
421         /*
422          * Set features to indicate we support parahotplug (if Command
423          * also supports it).
424          */
425         features = inmsg->cmd.init_chipset.features &
426                    ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
427
428         /*
429          * Set the "reply" bit so Command knows this is a
430          * features-aware driver.
431          */
432         features |= ULTRA_CHIPSET_FEATURE_REPLY;
433
434 out_respond:
435         if (inmsg->hdr.flags.response_expected)
436                 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
437
438         return res;
439 }
440
441 static int
442 controlvm_respond(struct controlvm_message_header *msg_hdr, int response,
443                   struct spar_segment_state *state)
444 {
445         struct controlvm_message outmsg;
446
447         controlvm_init_response(&outmsg, msg_hdr, response);
448         if (outmsg.hdr.flags.test_message == 1)
449                 return -EINVAL;
450
451         if (state) {
452                 outmsg.cmd.device_change_state.state = *state;
453                 outmsg.cmd.device_change_state.flags.phys_device = 1;
454         }
455
456         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
457                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
458 }
459
460 enum crash_obj_type {
461         CRASH_DEV,
462         CRASH_BUS,
463 };
464
465 static int
466 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
467 {
468         u32 local_crash_msg_offset;
469         u16 local_crash_msg_count;
470         int err;
471
472         err = visorchannel_read(chipset_dev->controlvm_channel,
473                                 offsetof(struct spar_controlvm_channel_protocol,
474                                          saved_crash_message_count),
475                                 &local_crash_msg_count, sizeof(u16));
476         if (err) {
477                 dev_err(&chipset_dev->acpi_device->dev,
478                         "failed to read message count\n");
479                 return err;
480         }
481
482         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
483                 dev_err(&chipset_dev->acpi_device->dev,
484                         "invalid number of messages\n");
485                 return -EIO;
486         }
487
488         err = visorchannel_read(chipset_dev->controlvm_channel,
489                                 offsetof(struct spar_controlvm_channel_protocol,
490                                          saved_crash_message_offset),
491                                 &local_crash_msg_offset, sizeof(u32));
492         if (err) {
493                 dev_err(&chipset_dev->acpi_device->dev,
494                         "failed to read offset\n");
495                 return err;
496         }
497
498         switch (typ) {
499         case CRASH_DEV:
500                 local_crash_msg_offset += sizeof(struct controlvm_message);
501                 err = visorchannel_write(chipset_dev->controlvm_channel,
502                                          local_crash_msg_offset,
503                                          msg,
504                                          sizeof(struct controlvm_message));
505                 if (err) {
506                         dev_err(&chipset_dev->acpi_device->dev,
507                                 "failed to write dev msg\n");
508                         return err;
509                 }
510                 break;
511         case CRASH_BUS:
512                 err = visorchannel_write(chipset_dev->controlvm_channel,
513                                          local_crash_msg_offset,
514                                          msg,
515                                          sizeof(struct controlvm_message));
516                 if (err) {
517                         dev_err(&chipset_dev->acpi_device->dev,
518                                 "failed to write bus msg\n");
519                         return err;
520                 }
521                 break;
522         default:
523                 dev_err(&chipset_dev->acpi_device->dev,
524                         "Invalid crash_obj_type\n");
525                 break;
526         }
527         return 0;
528 }
529
530 static int
531 controlvm_responder(enum controlvm_id cmd_id,
532                     struct controlvm_message_header *pending_msg_hdr,
533                     int response)
534 {
535         if (!pending_msg_hdr)
536                 return -EIO;
537
538         if (pending_msg_hdr->id != (u32)cmd_id)
539                 return -EINVAL;
540
541         return controlvm_respond(pending_msg_hdr, response, NULL);
542 }
543
544 static int
545 device_changestate_responder(enum controlvm_id cmd_id,
546                              struct visor_device *p, int response,
547                              struct spar_segment_state response_state)
548 {
549         struct controlvm_message outmsg;
550         u32 bus_no = p->chipset_bus_no;
551         u32 dev_no = p->chipset_dev_no;
552
553         if (!p->pending_msg_hdr)
554                 return -EIO;
555         if (p->pending_msg_hdr->id != cmd_id)
556                 return -EINVAL;
557
558         controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
559
560         outmsg.cmd.device_change_state.bus_no = bus_no;
561         outmsg.cmd.device_change_state.dev_no = dev_no;
562         outmsg.cmd.device_change_state.state = response_state;
563
564         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
565                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
566 }
567
568 static int
569 bus_create(struct controlvm_message *inmsg)
570 {
571         struct controlvm_message_packet *cmd = &inmsg->cmd;
572         struct controlvm_message_header *pmsg_hdr = NULL;
573         u32 bus_no = cmd->create_bus.bus_no;
574         struct visor_device *bus_info;
575         struct visorchannel *visorchannel;
576         int err;
577
578         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
579         if (bus_info && (bus_info->state.created == 1)) {
580                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
581                                DIAG_SEVERITY_ERR);
582                 err = -EEXIST;
583                 goto err_respond;
584         }
585
586         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
587         if (!bus_info) {
588                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
589                                DIAG_SEVERITY_ERR);
590                 err = -ENOMEM;
591                 goto err_respond;
592         }
593
594         INIT_LIST_HEAD(&bus_info->list_all);
595         bus_info->chipset_bus_no = bus_no;
596         bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
597
598         POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
599
600         if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
601                 err = save_crash_message(inmsg, CRASH_BUS);
602                 if (err)
603                         goto err_free_bus_info;
604         }
605
606         if (inmsg->hdr.flags.response_expected == 1) {
607                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
608                                    GFP_KERNEL);
609                 if (!pmsg_hdr) {
610                         POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
611                                        bus_info->chipset_bus_no,
612                                        DIAG_SEVERITY_ERR);
613                         err = -ENOMEM;
614                         goto err_free_bus_info;
615                 }
616
617                 memcpy(pmsg_hdr, &inmsg->hdr,
618                        sizeof(struct controlvm_message_header));
619                 bus_info->pending_msg_hdr = pmsg_hdr;
620         }
621
622         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
623                                            cmd->create_bus.channel_bytes,
624                                            GFP_KERNEL,
625                                            cmd->create_bus.bus_data_type_uuid);
626
627         if (!visorchannel) {
628                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
629                                DIAG_SEVERITY_ERR);
630                 err = -ENOMEM;
631                 goto err_free_pending_msg;
632         }
633         bus_info->visorchannel = visorchannel;
634
635         /* Response will be handled by chipset_bus_create */
636         err = chipset_bus_create(bus_info);
637         /* If error chipset_bus_create didn't respond, need to respond here */
638         if (err)
639                 goto err_destroy_channel;
640
641         POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
642         return 0;
643
644 err_destroy_channel:
645         visorchannel_destroy(visorchannel);
646
647 err_free_pending_msg:
648         kfree(bus_info->pending_msg_hdr);
649
650 err_free_bus_info:
651         kfree(bus_info);
652
653 err_respond:
654         if (inmsg->hdr.flags.response_expected == 1)
655                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
656         return err;
657 }
658
659 static int
660 bus_destroy(struct controlvm_message *inmsg)
661 {
662         struct controlvm_message_packet *cmd = &inmsg->cmd;
663         struct controlvm_message_header *pmsg_hdr = NULL;
664         u32 bus_no = cmd->destroy_bus.bus_no;
665         struct visor_device *bus_info;
666         int err;
667
668         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
669         if (!bus_info) {
670                 err = -ENODEV;
671                 goto err_respond;
672         }
673         if (bus_info->state.created == 0) {
674                 err = -ENOENT;
675                 goto err_respond;
676         }
677         if (bus_info->pending_msg_hdr) {
678                 /* only non-NULL if dev is still waiting on a response */
679                 err = -EEXIST;
680                 goto err_respond;
681         }
682         if (inmsg->hdr.flags.response_expected == 1) {
683                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
684                 if (!pmsg_hdr) {
685                         err = -ENOMEM;
686                         goto err_respond;
687                 }
688
689                 memcpy(pmsg_hdr, &inmsg->hdr,
690                        sizeof(struct controlvm_message_header));
691                 bus_info->pending_msg_hdr = pmsg_hdr;
692         }
693
694         /* Response will be handled by chipset_bus_destroy */
695         chipset_bus_destroy(bus_info);
696         return 0;
697
698 err_respond:
699         if (inmsg->hdr.flags.response_expected == 1)
700                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
701         return err;
702 }
703
704 static int
705 bus_configure(struct controlvm_message *inmsg,
706               struct parser_context *parser_ctx)
707 {
708         struct controlvm_message_packet *cmd = &inmsg->cmd;
709         u32 bus_no;
710         struct visor_device *bus_info;
711         int err = 0;
712
713         bus_no = cmd->configure_bus.bus_no;
714         POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
715                        DIAG_SEVERITY_PRINT);
716
717         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
718         if (!bus_info) {
719                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
720                                DIAG_SEVERITY_ERR);
721                 err = -EINVAL;
722                 goto err_respond;
723         } else if (bus_info->state.created == 0) {
724                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
725                                DIAG_SEVERITY_ERR);
726                 err = -EINVAL;
727                 goto err_respond;
728         } else if (bus_info->pending_msg_hdr) {
729                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
730                                DIAG_SEVERITY_ERR);
731                 err = -EIO;
732                 goto err_respond;
733         }
734
735         err = visorchannel_set_clientpartition
736                 (bus_info->visorchannel,
737                  cmd->configure_bus.guest_handle);
738         if (err)
739                 goto err_respond;
740
741         if (parser_ctx) {
742                 bus_info->partition_uuid = parser_id_get(parser_ctx);
743                 bus_info->name = parser_name_get(parser_ctx);
744         }
745
746         POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
747                        DIAG_SEVERITY_PRINT);
748
749         if (inmsg->hdr.flags.response_expected == 1)
750                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
751         return 0;
752
753 err_respond:
754         if (inmsg->hdr.flags.response_expected == 1)
755                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
756         return err;
757 }
758
759 static int
760 my_device_create(struct controlvm_message *inmsg)
761 {
762         struct controlvm_message_packet *cmd = &inmsg->cmd;
763         struct controlvm_message_header *pmsg_hdr = NULL;
764         u32 bus_no = cmd->create_device.bus_no;
765         u32 dev_no = cmd->create_device.dev_no;
766         struct visor_device *dev_info = NULL;
767         struct visor_device *bus_info;
768         struct visorchannel *visorchannel;
769         int err;
770
771         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
772         if (!bus_info) {
773                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
774                                DIAG_SEVERITY_ERR);
775                 err = -ENODEV;
776                 goto err_respond;
777         }
778
779         if (bus_info->state.created == 0) {
780                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
781                                DIAG_SEVERITY_ERR);
782                 err = -EINVAL;
783                 goto err_respond;
784         }
785
786         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
787         if (dev_info && (dev_info->state.created == 1)) {
788                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
789                                DIAG_SEVERITY_ERR);
790                 err = -EEXIST;
791                 goto err_respond;
792         }
793
794         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
795         if (!dev_info) {
796                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
797                                DIAG_SEVERITY_ERR);
798                 err = -ENOMEM;
799                 goto err_respond;
800         }
801
802         dev_info->chipset_bus_no = bus_no;
803         dev_info->chipset_dev_no = dev_no;
804         dev_info->inst = cmd->create_device.dev_inst_uuid;
805
806         /* not sure where the best place to set the 'parent' */
807         dev_info->device.parent = &bus_info->device;
808
809         POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
810                        DIAG_SEVERITY_PRINT);
811
812         visorchannel =
813                visorchannel_create_with_lock(cmd->create_device.channel_addr,
814                                              cmd->create_device.channel_bytes,
815                                              GFP_KERNEL,
816                                              cmd->create_device.data_type_uuid);
817
818         if (!visorchannel) {
819                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
820                                DIAG_SEVERITY_ERR);
821                 err = -ENOMEM;
822                 goto err_free_dev_info;
823         }
824         dev_info->visorchannel = visorchannel;
825         dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
826         if (uuid_le_cmp(cmd->create_device.data_type_uuid,
827                         spar_vhba_channel_protocol_uuid) == 0) {
828                 err = save_crash_message(inmsg, CRASH_DEV);
829                 if (err)
830                         goto err_destroy_visorchannel;
831         }
832
833         if (inmsg->hdr.flags.response_expected == 1) {
834                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
835                 if (!pmsg_hdr) {
836                         err = -ENOMEM;
837                         goto err_destroy_visorchannel;
838                 }
839
840                 memcpy(pmsg_hdr, &inmsg->hdr,
841                        sizeof(struct controlvm_message_header));
842                 dev_info->pending_msg_hdr = pmsg_hdr;
843         }
844         /* Chipset_device_create will send response */
845         err = chipset_device_create(dev_info);
846         if (err)
847                 goto err_destroy_visorchannel;
848
849         POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
850                        DIAG_SEVERITY_PRINT);
851         return 0;
852
853 err_destroy_visorchannel:
854         visorchannel_destroy(visorchannel);
855
856 err_free_dev_info:
857         kfree(dev_info);
858
859 err_respond:
860         if (inmsg->hdr.flags.response_expected == 1)
861                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
862         return err;
863 }
864
865 static int
866 my_device_changestate(struct controlvm_message *inmsg)
867 {
868         struct controlvm_message_packet *cmd = &inmsg->cmd;
869         struct controlvm_message_header *pmsg_hdr = NULL;
870         u32 bus_no = cmd->device_change_state.bus_no;
871         u32 dev_no = cmd->device_change_state.dev_no;
872         struct spar_segment_state state = cmd->device_change_state.state;
873         struct visor_device *dev_info;
874         int err = 0;
875
876         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
877         if (!dev_info) {
878                 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
879                                DIAG_SEVERITY_ERR);
880                 err = -ENODEV;
881                 goto err_respond;
882         }
883         if (dev_info->state.created == 0) {
884                 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
885                                DIAG_SEVERITY_ERR);
886                 err = -EINVAL;
887                 goto err_respond;
888         }
889         if (dev_info->pending_msg_hdr) {
890                 /* only non-NULL if dev is still waiting on a response */
891                 err = -EIO;
892                 goto err_respond;
893         }
894         if (inmsg->hdr.flags.response_expected == 1) {
895                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
896                 if (!pmsg_hdr) {
897                         err = -ENOMEM;
898                         goto err_respond;
899                 }
900
901                 memcpy(pmsg_hdr, &inmsg->hdr,
902                        sizeof(struct controlvm_message_header));
903                 dev_info->pending_msg_hdr = pmsg_hdr;
904         }
905
906         if (state.alive == segment_state_running.alive &&
907             state.operating == segment_state_running.operating)
908                 /* Response will be sent from chipset_device_resume */
909                 err = chipset_device_resume(dev_info);
910         /* ServerNotReady / ServerLost / SegmentStateStandby */
911         else if (state.alive == segment_state_standby.alive &&
912                  state.operating == segment_state_standby.operating)
913                 /*
914                  * technically this is standby case where server is lost.
915                  * Response will be sent from chipset_device_pause.
916                  */
917                 err = chipset_device_pause(dev_info);
918         if (err)
919                 goto err_respond;
920
921         return 0;
922
923 err_respond:
924         if (inmsg->hdr.flags.response_expected == 1)
925                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
926         return err;
927 }
928
929 static int
930 my_device_destroy(struct controlvm_message *inmsg)
931 {
932         struct controlvm_message_packet *cmd = &inmsg->cmd;
933         struct controlvm_message_header *pmsg_hdr = NULL;
934         u32 bus_no = cmd->destroy_device.bus_no;
935         u32 dev_no = cmd->destroy_device.dev_no;
936         struct visor_device *dev_info;
937         int err;
938
939         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
940         if (!dev_info) {
941                 err = -ENODEV;
942                 goto err_respond;
943         }
944         if (dev_info->state.created == 0) {
945                 err = -EINVAL;
946                 goto err_respond;
947         }
948
949         if (dev_info->pending_msg_hdr) {
950                 /* only non-NULL if dev is still waiting on a response */
951                 err = -EIO;
952                 goto err_respond;
953         }
954         if (inmsg->hdr.flags.response_expected == 1) {
955                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
956                 if (!pmsg_hdr) {
957                         err = -ENOMEM;
958                         goto err_respond;
959                 }
960
961                 memcpy(pmsg_hdr, &inmsg->hdr,
962                        sizeof(struct controlvm_message_header));
963                 dev_info->pending_msg_hdr = pmsg_hdr;
964         }
965
966         chipset_device_destroy(dev_info);
967         return 0;
968
969 err_respond:
970         if (inmsg->hdr.flags.response_expected == 1)
971                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
972         return err;
973 }
974
975 /*
976  * The general parahotplug flow works as follows. The visorchipset receives
977  * a DEVICE_CHANGESTATE message from Command specifying a physical device
978  * to enable or disable. The CONTROLVM message handler calls
979  * parahotplug_process_message, which then adds the message to a global list
980  * and kicks off a udev event which causes a user level script to enable or
981  * disable the specified device. The udev script then writes to
982  * /sys/devices/platform/visorchipset/parahotplug, which causes the
983  * parahotplug store functions to get called, at which point the
984  * appropriate CONTROLVM message is retrieved from the list and responded
985  * to.
986  */
987
988 #define PARAHOTPLUG_TIMEOUT_MS 2000
989
990 /*
991  * parahotplug_next_id() - generate unique int to match an outstanding
992  *                         CONTROLVM message with a udev script /sys
993  *                         response
994  *
995  * Return: a unique integer value
996  */
997 static int
998 parahotplug_next_id(void)
999 {
1000         static atomic_t id = ATOMIC_INIT(0);
1001
1002         return atomic_inc_return(&id);
1003 }
1004
1005 /*
1006  * parahotplug_next_expiration() - returns the time (in jiffies) when a
1007  *                                 CONTROLVM message on the list should expire
1008  *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
1009  *
1010  * Return: expected expiration time (in jiffies)
1011  */
1012 static unsigned long
1013 parahotplug_next_expiration(void)
1014 {
1015         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1016 }
1017
1018 /*
1019  * parahotplug_request_create() - create a parahotplug_request, which is
1020  *                                basically a wrapper for a CONTROLVM_MESSAGE
1021  *                                that we can stick on a list
1022  * @msg: the message to insert in the request
1023  *
1024  * Return: the request containing the provided message
1025  */
1026 static struct parahotplug_request *
1027 parahotplug_request_create(struct controlvm_message *msg)
1028 {
1029         struct parahotplug_request *req;
1030
1031         req = kmalloc(sizeof(*req), GFP_KERNEL);
1032         if (!req)
1033                 return NULL;
1034
1035         req->id = parahotplug_next_id();
1036         req->expiration = parahotplug_next_expiration();
1037         req->msg = *msg;
1038
1039         return req;
1040 }
1041
1042 /*
1043  * parahotplug_request_destroy() - free a parahotplug_request
1044  * @req: the request to deallocate
1045  */
1046 static void
1047 parahotplug_request_destroy(struct parahotplug_request *req)
1048 {
1049         kfree(req);
1050 }
1051
1052 static LIST_HEAD(parahotplug_request_list);
1053 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
1054
1055 /*
1056  * parahotplug_request_complete() - mark request as complete
1057  * @id:     the id of the request
1058  * @active: indicates whether the request is assigned to active partition
1059  *
1060  * Called from the /sys handler, which means the user script has
1061  * finished the enable/disable. Find the matching identifier, and
1062  * respond to the CONTROLVM message with success.
1063  *
1064  * Return: 0 on success or -EINVAL on failure
1065  */
1066 static int
1067 parahotplug_request_complete(int id, u16 active)
1068 {
1069         struct list_head *pos;
1070         struct list_head *tmp;
1071
1072         spin_lock(&parahotplug_request_list_lock);
1073
1074         /* Look for a request matching "id". */
1075         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1076                 struct parahotplug_request *req =
1077                     list_entry(pos, struct parahotplug_request, list);
1078                 if (req->id == id) {
1079                         /*
1080                          * Found a match. Remove it from the list and
1081                          * respond.
1082                          */
1083                         list_del(pos);
1084                         spin_unlock(&parahotplug_request_list_lock);
1085                         req->msg.cmd.device_change_state.state.active = active;
1086                         if (req->msg.hdr.flags.response_expected)
1087                                 controlvm_respond(
1088                                        &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1089                                        &req->msg.cmd.device_change_state.state);
1090                         parahotplug_request_destroy(req);
1091                         return 0;
1092                 }
1093         }
1094
1095         spin_unlock(&parahotplug_request_list_lock);
1096         return -EINVAL;
1097 }
1098
1099 /*
1100  * devicedisabled_store() - disables the hotplug device
1101  * @dev:   sysfs interface variable not utilized in this function
1102  * @attr:  sysfs interface variable not utilized in this function
1103  * @buf:   buffer containing the device id
1104  * @count: the size of the buffer
1105  *
1106  * The parahotplug/devicedisabled interface gets called by our support script
1107  * when an SR-IOV device has been shut down. The ID is passed to the script
1108  * and then passed back when the device has been removed.
1109  *
1110  * Return: the size of the buffer for success or negative for error
1111  */
1112 static ssize_t devicedisabled_store(struct device *dev,
1113                                     struct device_attribute *attr,
1114                                     const char *buf, size_t count)
1115 {
1116         unsigned int id;
1117         int err;
1118
1119         if (kstrtouint(buf, 10, &id))
1120                 return -EINVAL;
1121
1122         err = parahotplug_request_complete(id, 0);
1123         if (err < 0)
1124                 return err;
1125         return count;
1126 }
1127 static DEVICE_ATTR_WO(devicedisabled);
1128
1129 /*
1130  * deviceenabled_store() - enables the hotplug device
1131  * @dev:   sysfs interface variable not utilized in this function
1132  * @attr:  sysfs interface variable not utilized in this function
1133  * @buf:   buffer containing the device id
1134  * @count: the size of the buffer
1135  *
1136  * The parahotplug/deviceenabled interface gets called by our support script
1137  * when an SR-IOV device has been recovered. The ID is passed to the script
1138  * and then passed back when the device has been brought back up.
1139  *
1140  * Return: the size of the buffer for success or negative for error
1141  */
1142 static ssize_t deviceenabled_store(struct device *dev,
1143                                    struct device_attribute *attr,
1144                                    const char *buf, size_t count)
1145 {
1146         unsigned int id;
1147
1148         if (kstrtouint(buf, 10, &id))
1149                 return -EINVAL;
1150
1151         parahotplug_request_complete(id, 1);
1152         return count;
1153 }
1154 static DEVICE_ATTR_WO(deviceenabled);
1155
1156 static struct attribute *visorchipset_install_attrs[] = {
1157         &dev_attr_toolaction.attr,
1158         &dev_attr_boottotool.attr,
1159         &dev_attr_error.attr,
1160         &dev_attr_textid.attr,
1161         &dev_attr_remaining_steps.attr,
1162         NULL
1163 };
1164
1165 static const struct attribute_group visorchipset_install_group = {
1166         .name = "install",
1167         .attrs = visorchipset_install_attrs
1168 };
1169
1170 static struct attribute *visorchipset_parahotplug_attrs[] = {
1171         &dev_attr_devicedisabled.attr,
1172         &dev_attr_deviceenabled.attr,
1173         NULL
1174 };
1175
1176 static struct attribute_group visorchipset_parahotplug_group = {
1177         .name = "parahotplug",
1178         .attrs = visorchipset_parahotplug_attrs
1179 };
1180
1181 static const struct attribute_group *visorchipset_dev_groups[] = {
1182         &visorchipset_install_group,
1183         &visorchipset_parahotplug_group,
1184         NULL
1185 };
1186
1187 /*
1188  * parahotplug_request_kickoff() - initiate parahotplug request
1189  * @req: the request to initiate
1190  *
1191  * Cause uevent to run the user level script to do the disable/enable specified
1192  * in the parahotplug_request.
1193  */
1194 static int
1195 parahotplug_request_kickoff(struct parahotplug_request *req)
1196 {
1197         struct controlvm_message_packet *cmd = &req->msg.cmd;
1198         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1199             env_func[40];
1200         char *envp[] = {
1201                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1202         };
1203
1204         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1205         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1206         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1207                 cmd->device_change_state.state.active);
1208         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1209                 cmd->device_change_state.bus_no);
1210         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1211                 cmd->device_change_state.dev_no >> 3);
1212         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1213                 cmd->device_change_state.dev_no & 0x7);
1214
1215         return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1216                                   KOBJ_CHANGE, envp);
1217 }
1218
1219 /*
1220  * parahotplug_process_message() - enables or disables a PCI device by kicking
1221  *                                 off a udev script
1222  * @inmsg: the message indicating whether to enable or disable
1223  */
1224 static int
1225 parahotplug_process_message(struct controlvm_message *inmsg)
1226 {
1227         struct parahotplug_request *req;
1228         int err;
1229
1230         req = parahotplug_request_create(inmsg);
1231
1232         if (!req)
1233                 return -ENOMEM;
1234
1235         /*
1236          * For enable messages, just respond with success right away, we don't
1237          * need to wait to see if the enable was successful.
1238          */
1239         if (inmsg->cmd.device_change_state.state.active) {
1240                 err = parahotplug_request_kickoff(req);
1241                 if (err)
1242                         goto err_respond;
1243                 controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
1244                                   &inmsg->cmd.device_change_state.state);
1245                 parahotplug_request_destroy(req);
1246                 return 0;
1247         }
1248
1249         /*
1250          * For disable messages, add the request to the
1251          * request list before kicking off the udev script. It
1252          * won't get responded to until the script has
1253          * indicated it's done.
1254          */
1255         spin_lock(&parahotplug_request_list_lock);
1256         list_add_tail(&req->list, &parahotplug_request_list);
1257         spin_unlock(&parahotplug_request_list_lock);
1258
1259         err = parahotplug_request_kickoff(req);
1260         if (err)
1261                 goto err_respond;
1262         return 0;
1263
1264 err_respond:
1265         controlvm_respond(&inmsg->hdr, err,
1266                           &inmsg->cmd.device_change_state.state);
1267         return err;
1268 }
1269
1270 /*
1271  * chipset_ready_uevent() - sends chipset_ready action
1272  *
1273  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1274  *
1275  * Return: 0 on success, negative on failure
1276  */
1277 static int
1278 chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1279 {
1280         int res;
1281
1282         res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1283                              KOBJ_ONLINE);
1284
1285         if (msg_hdr->flags.response_expected)
1286                 controlvm_respond(msg_hdr, res, NULL);
1287
1288         return res;
1289 }
1290
1291 /*
1292  * chipset_selftest_uevent() - sends chipset_selftest action
1293  *
1294  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1295  *
1296  * Return: 0 on success, negative on failure
1297  */
1298 static int
1299 chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1300 {
1301         char env_selftest[20];
1302         char *envp[] = { env_selftest, NULL };
1303         int res;
1304
1305         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1306         res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1307                                  KOBJ_CHANGE, envp);
1308
1309         if (msg_hdr->flags.response_expected)
1310                 controlvm_respond(msg_hdr, res, NULL);
1311
1312         return res;
1313 }
1314
1315 /*
1316  * chipset_notready_uevent() - sends chipset_notready action
1317  *
1318  * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1319  *
1320  * Return: 0 on success, negative on failure
1321  */
1322 static int
1323 chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1324 {
1325         int res;
1326
1327         res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1328                              KOBJ_OFFLINE);
1329         if (msg_hdr->flags.response_expected)
1330                 controlvm_respond(msg_hdr, res, NULL);
1331
1332         return res;
1333 }
1334
1335 static int unisys_vmcall(unsigned long tuple, unsigned long param)
1336 {
1337         int result = 0;
1338         unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
1339         unsigned long reg_ebx;
1340         unsigned long reg_ecx;
1341
1342         reg_ebx = param & 0xFFFFFFFF;
1343         reg_ecx = param >> 32;
1344
1345         cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
1346         if (!(cpuid_ecx & 0x80000000))
1347                 return -EPERM;
1348
1349         __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
1350                 "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
1351
1352         if (result)
1353                 goto error;
1354
1355         return 0;
1356
1357 error: /* Need to convert from VMCALL error codes to Linux */
1358         switch (result) {
1359         case VMCALL_RESULT_INVALID_PARAM:
1360                 return -EINVAL;
1361         case VMCALL_RESULT_DATA_UNAVAILABLE:
1362                 return -ENODEV;
1363         default:
1364                 return -EFAULT;
1365         }
1366 }
1367 static unsigned int
1368 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1369 {
1370         struct vmcall_io_controlvm_addr_params params;
1371         int err;
1372         u64 physaddr;
1373
1374         physaddr = virt_to_phys(&params);
1375         err = unisys_vmcall(VMCALL_CONTROLVM_ADDR, physaddr);
1376         if (err)
1377                 return err;
1378
1379         *control_addr = params.address;
1380         *control_bytes = params.channel_bytes;
1381
1382         return 0;
1383 }
1384
1385 static u64 controlvm_get_channel_address(void)
1386 {
1387         u64 addr = 0;
1388         u32 size = 0;
1389
1390         if (issue_vmcall_io_controlvm_addr(&addr, &size))
1391                 return 0;
1392
1393         return addr;
1394 }
1395
1396 static void
1397 setup_crash_devices_work_queue(struct work_struct *work)
1398 {
1399         struct controlvm_message local_crash_bus_msg;
1400         struct controlvm_message local_crash_dev_msg;
1401         struct controlvm_message msg;
1402         u32 local_crash_msg_offset;
1403         u16 local_crash_msg_count;
1404
1405         POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
1406
1407         /* send init chipset msg */
1408         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1409         msg.cmd.init_chipset.bus_count = 23;
1410         msg.cmd.init_chipset.switch_count = 0;
1411
1412         chipset_init(&msg);
1413
1414         /* get saved message count */
1415         if (visorchannel_read(chipset_dev->controlvm_channel,
1416                               offsetof(struct spar_controlvm_channel_protocol,
1417                                        saved_crash_message_count),
1418                               &local_crash_msg_count, sizeof(u16)) < 0) {
1419                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1420                                DIAG_SEVERITY_ERR);
1421                 return;
1422         }
1423
1424         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1425                 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
1426                                local_crash_msg_count,
1427                                DIAG_SEVERITY_ERR);
1428                 return;
1429         }
1430
1431         /* get saved crash message offset */
1432         if (visorchannel_read(chipset_dev->controlvm_channel,
1433                               offsetof(struct spar_controlvm_channel_protocol,
1434                                        saved_crash_message_offset),
1435                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1436                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1437                                DIAG_SEVERITY_ERR);
1438                 return;
1439         }
1440
1441         /* read create device message for storage bus offset */
1442         if (visorchannel_read(chipset_dev->controlvm_channel,
1443                               local_crash_msg_offset,
1444                               &local_crash_bus_msg,
1445                               sizeof(struct controlvm_message)) < 0) {
1446                 POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
1447                                DIAG_SEVERITY_ERR);
1448                 return;
1449         }
1450
1451         /* read create device message for storage device */
1452         if (visorchannel_read(chipset_dev->controlvm_channel,
1453                               local_crash_msg_offset +
1454                               sizeof(struct controlvm_message),
1455                               &local_crash_dev_msg,
1456                               sizeof(struct controlvm_message)) < 0) {
1457                 POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
1458                                DIAG_SEVERITY_ERR);
1459                 return;
1460         }
1461
1462         /* reuse IOVM create bus message */
1463         if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1464                 bus_create(&local_crash_bus_msg);
1465         } else {
1466                 POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
1467                                DIAG_SEVERITY_ERR);
1468                 return;
1469         }
1470
1471         /* reuse create device message for storage device */
1472         if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1473                 my_device_create(&local_crash_dev_msg);
1474         } else {
1475                 POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
1476                                DIAG_SEVERITY_ERR);
1477                 return;
1478         }
1479         POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1480 }
1481
1482 void
1483 bus_create_response(struct visor_device *bus_info, int response)
1484 {
1485         if (response >= 0)
1486                 bus_info->state.created = 1;
1487
1488         controlvm_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1489                             response);
1490
1491         kfree(bus_info->pending_msg_hdr);
1492         bus_info->pending_msg_hdr = NULL;
1493 }
1494
1495 void
1496 bus_destroy_response(struct visor_device *bus_info, int response)
1497 {
1498         controlvm_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1499                             response);
1500
1501         kfree(bus_info->pending_msg_hdr);
1502         bus_info->pending_msg_hdr = NULL;
1503 }
1504
1505 void
1506 device_create_response(struct visor_device *dev_info, int response)
1507 {
1508         if (response >= 0)
1509                 dev_info->state.created = 1;
1510
1511         controlvm_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1512                             response);
1513
1514         kfree(dev_info->pending_msg_hdr);
1515         dev_info->pending_msg_hdr = NULL;
1516 }
1517
1518 void
1519 device_destroy_response(struct visor_device *dev_info, int response)
1520 {
1521         controlvm_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1522                             response);
1523
1524         kfree(dev_info->pending_msg_hdr);
1525         dev_info->pending_msg_hdr = NULL;
1526 }
1527
1528 void
1529 device_pause_response(struct visor_device *dev_info,
1530                       int response)
1531 {
1532         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1533                                      dev_info, response,
1534                                      segment_state_standby);
1535
1536         kfree(dev_info->pending_msg_hdr);
1537         dev_info->pending_msg_hdr = NULL;
1538 }
1539
1540 void
1541 device_resume_response(struct visor_device *dev_info, int response)
1542 {
1543         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1544                                      dev_info, response,
1545                                      segment_state_running);
1546
1547         kfree(dev_info->pending_msg_hdr);
1548         dev_info->pending_msg_hdr = NULL;
1549 }
1550
1551 static struct parser_context *
1552 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1553 {
1554         int allocbytes = sizeof(struct parser_context) + bytes;
1555         struct parser_context *ctx;
1556
1557         *retry = false;
1558
1559         /*
1560          * alloc an 0 extra byte to ensure payload is
1561          * '\0'-terminated
1562          */
1563         allocbytes++;
1564         if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
1565             > MAX_CONTROLVM_PAYLOAD_BYTES) {
1566                 *retry = true;
1567                 return NULL;
1568         }
1569         ctx = kzalloc(allocbytes, GFP_KERNEL);
1570         if (!ctx) {
1571                 *retry = true;
1572                 return NULL;
1573         }
1574
1575         ctx->allocbytes = allocbytes;
1576         ctx->param_bytes = bytes;
1577         ctx->curr = NULL;
1578         ctx->bytes_remaining = 0;
1579         ctx->byte_stream = false;
1580         if (local) {
1581                 void *p;
1582
1583                 if (addr > virt_to_phys(high_memory - 1))
1584                         goto err_finish_ctx;
1585                 p = __va((unsigned long)(addr));
1586                 memcpy(ctx->data, p, bytes);
1587         } else {
1588                 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1589
1590                 if (!mapping)
1591                         goto err_finish_ctx;
1592                 memcpy(ctx->data, mapping, bytes);
1593                 memunmap(mapping);
1594         }
1595
1596         ctx->byte_stream = true;
1597         chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1598
1599         return ctx;
1600
1601 err_finish_ctx:
1602         parser_done(ctx);
1603         return NULL;
1604 }
1605
1606 /*
1607  * handle_command() - process a controlvm message
1608  * @inmsg:        the message to process
1609  * @channel_addr: address of the controlvm channel
1610  *
1611  * Return:
1612  *      0       - Successfully processed the message
1613  *      -EAGAIN - ControlVM message was not processed and should be retried
1614  *                reading the next controlvm message; a scenario where this can
1615  *                occur is when we need to throttle the allocation of memory in
1616  *                which to copy out controlvm payload data.
1617  *      < 0     - error: ControlVM message was processed but an error occurred.
1618  */
1619 static int
1620 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1621 {
1622         struct controlvm_message_packet *cmd = &inmsg.cmd;
1623         u64 parm_addr;
1624         u32 parm_bytes;
1625         struct parser_context *parser_ctx = NULL;
1626         bool local_addr;
1627         struct controlvm_message ackmsg;
1628         int err = 0;
1629
1630         /* create parsing context if necessary */
1631         local_addr = (inmsg.hdr.flags.test_message == 1);
1632         if (channel_addr == 0)
1633                 return -EINVAL;
1634
1635         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1636         parm_bytes = inmsg.hdr.payload_bytes;
1637
1638         /*
1639          * Parameter and channel addresses within test messages actually lie
1640          * within our OS-controlled memory. We need to know that, because it
1641          * makes a difference in how we compute the virtual address.
1642          */
1643         if (parm_addr && parm_bytes) {
1644                 bool retry = false;
1645
1646                 parser_ctx =
1647                     parser_init_byte_stream(parm_addr, parm_bytes,
1648                                             local_addr, &retry);
1649                 if (!parser_ctx && retry)
1650                         return -EAGAIN;
1651         }
1652
1653         if (!local_addr) {
1654                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1655                                         CONTROLVM_RESP_SUCCESS);
1656                 err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1657                                                 CONTROLVM_QUEUE_ACK,
1658                                                 &ackmsg);
1659                 if (err)
1660                         return err;
1661         }
1662         switch (inmsg.hdr.id) {
1663         case CONTROLVM_CHIPSET_INIT:
1664                 err = chipset_init(&inmsg);
1665                 break;
1666         case CONTROLVM_BUS_CREATE:
1667                 err = bus_create(&inmsg);
1668                 break;
1669         case CONTROLVM_BUS_DESTROY:
1670                 err = bus_destroy(&inmsg);
1671                 break;
1672         case CONTROLVM_BUS_CONFIGURE:
1673                 err = bus_configure(&inmsg, parser_ctx);
1674                 break;
1675         case CONTROLVM_DEVICE_CREATE:
1676                 err = my_device_create(&inmsg);
1677                 break;
1678         case CONTROLVM_DEVICE_CHANGESTATE:
1679                 if (cmd->device_change_state.flags.phys_device) {
1680                         err = parahotplug_process_message(&inmsg);
1681                 } else {
1682                         /*
1683                          * save the hdr and cmd structures for later use
1684                          * when sending back the response to Command
1685                          */
1686                         err = my_device_changestate(&inmsg);
1687                         break;
1688                 }
1689                 break;
1690         case CONTROLVM_DEVICE_DESTROY:
1691                 err = my_device_destroy(&inmsg);
1692                 break;
1693         case CONTROLVM_DEVICE_CONFIGURE:
1694                 /* no op just send a respond that we passed */
1695                 if (inmsg.hdr.flags.response_expected)
1696                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
1697                                           NULL);
1698                 break;
1699         case CONTROLVM_CHIPSET_READY:
1700                 err = chipset_ready_uevent(&inmsg.hdr);
1701                 break;
1702         case CONTROLVM_CHIPSET_SELFTEST:
1703                 err = chipset_selftest_uevent(&inmsg.hdr);
1704                 break;
1705         case CONTROLVM_CHIPSET_STOP:
1706                 err = chipset_notready_uevent(&inmsg.hdr);
1707                 break;
1708         default:
1709                 err = -ENOMSG;
1710                 if (inmsg.hdr.flags.response_expected)
1711                         controlvm_respond(&inmsg.hdr,
1712                                           -CONTROLVM_RESP_ID_UNKNOWN, NULL);
1713                 break;
1714         }
1715
1716         if (parser_ctx) {
1717                 parser_done(parser_ctx);
1718                 parser_ctx = NULL;
1719         }
1720         return err;
1721 }
1722
1723 /*
1724  * read_controlvm_event() - retreives the next message from the
1725  *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
1726  *                          channel
1727  * @msg: pointer to the retrieved message
1728  *
1729  * Return: 0 if valid message was retrieved or -error
1730  */
1731 static int
1732 read_controlvm_event(struct controlvm_message *msg)
1733 {
1734         int err;
1735
1736         err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1737                                         CONTROLVM_QUEUE_EVENT, msg);
1738         if (err)
1739                 return err;
1740
1741         /* got a message */
1742         if (msg->hdr.flags.test_message == 1)
1743                 return -EINVAL;
1744
1745         return 0;
1746 }
1747
1748 /*
1749  * parahotplug_process_list() - remove any request from the list that's been on
1750  *                              there too long and respond with an error
1751  */
1752 static void
1753 parahotplug_process_list(void)
1754 {
1755         struct list_head *pos;
1756         struct list_head *tmp;
1757
1758         spin_lock(&parahotplug_request_list_lock);
1759
1760         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1761                 struct parahotplug_request *req =
1762                     list_entry(pos, struct parahotplug_request, list);
1763
1764                 if (!time_after_eq(jiffies, req->expiration))
1765                         continue;
1766
1767                 list_del(pos);
1768                 if (req->msg.hdr.flags.response_expected)
1769                         controlvm_respond(
1770                                 &req->msg.hdr,
1771                                 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1772                                 &req->msg.cmd.device_change_state.state);
1773                 parahotplug_request_destroy(req);
1774         }
1775
1776         spin_unlock(&parahotplug_request_list_lock);
1777 }
1778
1779 static void
1780 controlvm_periodic_work(struct work_struct *work)
1781 {
1782         struct controlvm_message inmsg;
1783         int count = 0;
1784         int err;
1785
1786         /* Drain the RESPONSE queue make it empty */
1787         do {
1788                 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1789                                                 CONTROLVM_QUEUE_RESPONSE,
1790                                                 &inmsg);
1791         } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
1792
1793         if (err != -EAGAIN)
1794                 goto schedule_out;
1795
1796         if (chipset_dev->controlvm_pending_msg_valid) {
1797                 /*
1798                  * we throttled processing of a prior
1799                  * msg, so try to process it again
1800                  * rather than reading a new one
1801                  */
1802                 inmsg = chipset_dev->controlvm_pending_msg;
1803                 chipset_dev->controlvm_pending_msg_valid = false;
1804                 err = 0;
1805         } else {
1806                 err = read_controlvm_event(&inmsg);
1807         }
1808
1809         while (!err) {
1810                 chipset_dev->most_recent_message_jiffies = jiffies;
1811                 err = handle_command(inmsg,
1812                                      visorchannel_get_physaddr
1813                                      (chipset_dev->controlvm_channel));
1814                 if (err == -EAGAIN) {
1815                         chipset_dev->controlvm_pending_msg = inmsg;
1816                         chipset_dev->controlvm_pending_msg_valid = true;
1817                         break;
1818                 }
1819
1820                 err = read_controlvm_event(&inmsg);
1821         }
1822
1823         /* parahotplug_worker */
1824         parahotplug_process_list();
1825
1826 schedule_out:
1827         if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1828                                 (HZ * MIN_IDLE_SECONDS))) {
1829                 /*
1830                  * it's been longer than MIN_IDLE_SECONDS since we
1831                  * processed our last controlvm message; slow down the
1832                  * polling
1833                  */
1834                 if (chipset_dev->poll_jiffies !=
1835                                               POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1836                         chipset_dev->poll_jiffies =
1837                                               POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1838         } else {
1839                 if (chipset_dev->poll_jiffies !=
1840                                               POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1841                         chipset_dev->poll_jiffies =
1842                                               POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1843         }
1844
1845         schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1846                               chipset_dev->poll_jiffies);
1847 }
1848
1849 static int
1850 visorchipset_init(struct acpi_device *acpi_device)
1851 {
1852         int err = -ENODEV;
1853         u64 addr;
1854         uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
1855         struct visorchannel *controlvm_channel;
1856
1857         addr = controlvm_get_channel_address();
1858         if (!addr)
1859                 goto error;
1860
1861         chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1862         if (!chipset_dev)
1863                 goto error;
1864
1865         acpi_device->driver_data = chipset_dev;
1866
1867         chipset_dev->acpi_device = acpi_device;
1868         chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1869         controlvm_channel = visorchannel_create_with_lock(addr,
1870                                                           0, GFP_KERNEL, uuid);
1871
1872         if (!controlvm_channel)
1873                 goto error_free_chipset_dev;
1874
1875         chipset_dev->controlvm_channel = controlvm_channel;
1876
1877         err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1878                                   visorchipset_dev_groups);
1879         if (err < 0)
1880                 goto error_destroy_channel;
1881
1882         if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
1883                                 visorchannel_get_header(controlvm_channel)))
1884                 goto error_delete_groups;
1885
1886         /* if booting in a crash kernel */
1887         if (is_kdump_kernel())
1888                 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1889                                   setup_crash_devices_work_queue);
1890         else
1891                 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1892                                   controlvm_periodic_work);
1893
1894         chipset_dev->most_recent_message_jiffies = jiffies;
1895         chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1896         schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1897                               chipset_dev->poll_jiffies);
1898
1899         POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
1900
1901         err = visorbus_init();
1902         if (err < 0)
1903                 goto error_cancel_work;
1904
1905         return 0;
1906
1907 error_cancel_work:
1908         cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1909
1910 error_delete_groups:
1911         sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1912                             visorchipset_dev_groups);
1913
1914 error_destroy_channel:
1915         visorchannel_destroy(chipset_dev->controlvm_channel);
1916
1917 error_free_chipset_dev:
1918         kfree(chipset_dev);
1919
1920 error:
1921         POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
1922         return err;
1923 }
1924
1925 static int
1926 visorchipset_exit(struct acpi_device *acpi_device)
1927 {
1928         visorbus_exit();
1929         cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1930         sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1931                             visorchipset_dev_groups);
1932
1933         visorchannel_destroy(chipset_dev->controlvm_channel);
1934         kfree(chipset_dev);
1935
1936         return 0;
1937 }
1938
1939 static const struct acpi_device_id unisys_device_ids[] = {
1940         {"PNP0A07", 0},
1941         {"", 0},
1942 };
1943
1944 static struct acpi_driver unisys_acpi_driver = {
1945         .name = "unisys_acpi",
1946         .class = "unisys_acpi_class",
1947         .owner = THIS_MODULE,
1948         .ids = unisys_device_ids,
1949         .ops = {
1950                 .add = visorchipset_init,
1951                 .remove = visorchipset_exit,
1952         },
1953 };
1954
1955 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1956
1957 static __init int visorutil_spar_detect(void)
1958 {
1959         unsigned int eax, ebx, ecx, edx;
1960
1961         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1962                 /* check the ID */
1963                 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1964                 return  (ebx == UNISYS_SPAR_ID_EBX) &&
1965                         (ecx == UNISYS_SPAR_ID_ECX) &&
1966                         (edx == UNISYS_SPAR_ID_EDX);
1967         } else {
1968                 return 0;
1969         }
1970 }
1971
1972 static int init_unisys(void)
1973 {
1974         int result;
1975
1976         if (!visorutil_spar_detect())
1977                 return -ENODEV;
1978
1979         result = acpi_bus_register_driver(&unisys_acpi_driver);
1980         if (result)
1981                 return -ENODEV;
1982
1983         pr_info("Unisys Visorchipset Driver Loaded.\n");
1984         return 0;
1985 };
1986
1987 static void exit_unisys(void)
1988 {
1989         acpi_bus_unregister_driver(&unisys_acpi_driver);
1990 }
1991
1992 module_init(init_unisys);
1993 module_exit(exit_unisys);
1994
1995 MODULE_AUTHOR("Unisys");
1996 MODULE_LICENSE("GPL");
1997 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");