staging: unisys: visorbus: add error handling textid_show
[linux-2.6-block.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  */
16
17 #include <linux/acpi.h>
18 #include <linux/ctype.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include <linux/nls.h>
22 #include <linux/netdevice.h>
23 #include <linux/uuid.h>
24 #include <linux/crash_dump.h>
25
26 #include "visorbus.h"
27 #include "visorbus_private.h"
28 #include "vmcallinterface.h"
29
30 #define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
31
32 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
33 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
34
35 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
36
37 #define UNISYS_SPAR_LEAF_ID 0x40000000
38
39 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
40 #define UNISYS_SPAR_ID_EBX 0x73696e55
41 #define UNISYS_SPAR_ID_ECX 0x70537379
42 #define UNISYS_SPAR_ID_EDX 0x34367261
43
44 /*
45  * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
46  * we switch to slow polling mode. As soon as we get a controlvm
47  * message, we switch back to fast polling mode.
48  */
49 #define MIN_IDLE_SECONDS 10
50
51 struct parser_context {
52         unsigned long allocbytes;
53         unsigned long param_bytes;
54         u8 *curr;
55         unsigned long bytes_remaining;
56         bool byte_stream;
57         char data[0];
58 };
59
60 struct visorchipset_device {
61         struct acpi_device *acpi_device;
62         unsigned long poll_jiffies;
63         /* when we got our last controlvm message */
64         unsigned long most_recent_message_jiffies;
65         struct delayed_work periodic_controlvm_work;
66         struct visorchannel *controlvm_channel;
67         unsigned long controlvm_payload_bytes_buffered;
68         /*
69          * The following variables are used to handle the scenario where we are
70          * unable to offload the payload from a controlvm message due to memory
71          * requirements. In this scenario, we simply stash the controlvm
72          * message, then attempt to process it again the next time
73          * controlvm_periodic_work() runs.
74          */
75         struct controlvm_message controlvm_pending_msg;
76         bool controlvm_pending_msg_valid;
77 };
78
79 static struct visorchipset_device *chipset_dev;
80
81 struct parahotplug_request {
82         struct list_head list;
83         int id;
84         unsigned long expiration;
85         struct controlvm_message msg;
86 };
87
88 /* prototypes for attributes */
89 static ssize_t toolaction_show(struct device *dev,
90                                struct device_attribute *attr,
91                                char *buf)
92 {
93         u8 tool_action = 0;
94         int err;
95
96         err = visorchannel_read(chipset_dev->controlvm_channel,
97                                 offsetof(struct spar_controlvm_channel_protocol,
98                                          tool_action),
99                                 &tool_action, sizeof(u8));
100         if (err)
101                 return err;
102
103         return sprintf(buf, "%u\n", tool_action);
104 }
105
106 static ssize_t toolaction_store(struct device *dev,
107                                 struct device_attribute *attr,
108                                 const char *buf, size_t count)
109 {
110         u8 tool_action;
111         int ret;
112
113         if (kstrtou8(buf, 10, &tool_action))
114                 return -EINVAL;
115
116         ret = visorchannel_write
117                 (chipset_dev->controlvm_channel,
118                  offsetof(struct spar_controlvm_channel_protocol,
119                           tool_action),
120                  &tool_action, sizeof(u8));
121
122         if (ret)
123                 return ret;
124         return count;
125 }
126 static DEVICE_ATTR_RW(toolaction);
127
128 static ssize_t boottotool_show(struct device *dev,
129                                struct device_attribute *attr,
130                                char *buf)
131 {
132         struct efi_spar_indication efi_spar_indication;
133         int err;
134
135         err = visorchannel_read(chipset_dev->controlvm_channel,
136                                 offsetof(struct spar_controlvm_channel_protocol,
137                                          efi_spar_ind),
138                                 &efi_spar_indication,
139                                 sizeof(struct efi_spar_indication));
140
141         if (err)
142                 return err;
143         return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
144 }
145
146 static ssize_t boottotool_store(struct device *dev,
147                                 struct device_attribute *attr,
148                                 const char *buf, size_t count)
149 {
150         int val, ret;
151         struct efi_spar_indication efi_spar_indication;
152
153         if (kstrtoint(buf, 10, &val))
154                 return -EINVAL;
155
156         efi_spar_indication.boot_to_tool = val;
157         ret = visorchannel_write
158                 (chipset_dev->controlvm_channel,
159                  offsetof(struct spar_controlvm_channel_protocol,
160                           efi_spar_ind), &(efi_spar_indication),
161                  sizeof(struct efi_spar_indication));
162
163         if (ret)
164                 return ret;
165         return count;
166 }
167 static DEVICE_ATTR_RW(boottotool);
168
169 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
170                           char *buf)
171 {
172         u32 error = 0;
173         int err;
174
175         err = visorchannel_read(chipset_dev->controlvm_channel,
176                                 offsetof(struct spar_controlvm_channel_protocol,
177                                          installation_error),
178                                 &error, sizeof(u32));
179         if (err)
180                 return err;
181         return sprintf(buf, "%i\n", error);
182 }
183
184 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
185                            const char *buf, size_t count)
186 {
187         u32 error;
188         int ret;
189
190         if (kstrtou32(buf, 10, &error))
191                 return -EINVAL;
192
193         ret = visorchannel_write
194                 (chipset_dev->controlvm_channel,
195                  offsetof(struct spar_controlvm_channel_protocol,
196                           installation_error),
197                  &error, sizeof(u32));
198         if (ret)
199                 return ret;
200         return count;
201 }
202 static DEVICE_ATTR_RW(error);
203
204 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
205                            char *buf)
206 {
207         u32 text_id = 0;
208         int err;
209
210         err = visorchannel_read
211                         (chipset_dev->controlvm_channel,
212                          offsetof(struct spar_controlvm_channel_protocol,
213                                   installation_text_id),
214                          &text_id, sizeof(u32));
215         if (err)
216                 return err;
217
218         return sprintf(buf, "%i\n", text_id);
219 }
220
221 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
222                             const char *buf, size_t count)
223 {
224         u32 text_id;
225         int ret;
226
227         if (kstrtou32(buf, 10, &text_id))
228                 return -EINVAL;
229
230         ret = visorchannel_write
231                 (chipset_dev->controlvm_channel,
232                  offsetof(struct spar_controlvm_channel_protocol,
233                           installation_text_id),
234                  &text_id, sizeof(u32));
235         if (ret)
236                 return ret;
237         return count;
238 }
239 static DEVICE_ATTR_RW(textid);
240
241 static ssize_t remaining_steps_show(struct device *dev,
242                                     struct device_attribute *attr, char *buf)
243 {
244         u16 remaining_steps = 0;
245
246         visorchannel_read(chipset_dev->controlvm_channel,
247                           offsetof(struct spar_controlvm_channel_protocol,
248                                    installation_remaining_steps),
249                           &remaining_steps, sizeof(u16));
250         return sprintf(buf, "%hu\n", remaining_steps);
251 }
252
253 static ssize_t remaining_steps_store(struct device *dev,
254                                      struct device_attribute *attr,
255                                      const char *buf, size_t count)
256 {
257         u16 remaining_steps;
258         int ret;
259
260         if (kstrtou16(buf, 10, &remaining_steps))
261                 return -EINVAL;
262
263         ret = visorchannel_write
264                 (chipset_dev->controlvm_channel,
265                  offsetof(struct spar_controlvm_channel_protocol,
266                           installation_remaining_steps),
267                  &remaining_steps, sizeof(u16));
268         if (ret)
269                 return ret;
270         return count;
271 }
272 static DEVICE_ATTR_RW(remaining_steps);
273
274 static uuid_le
275 parser_id_get(struct parser_context *ctx)
276 {
277         struct spar_controlvm_parameters_header *phdr = NULL;
278
279         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
280         return phdr->id;
281 }
282
283 static void parser_done(struct parser_context *ctx)
284 {
285         chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
286         kfree(ctx);
287 }
288
289 static void *
290 parser_string_get(struct parser_context *ctx)
291 {
292         u8 *pscan;
293         unsigned long nscan;
294         int value_length = -1;
295         void *value = NULL;
296         int i;
297
298         pscan = ctx->curr;
299         nscan = ctx->bytes_remaining;
300         if (nscan == 0)
301                 return NULL;
302         if (!pscan)
303                 return NULL;
304         for (i = 0, value_length = -1; i < nscan; i++)
305                 if (pscan[i] == '\0') {
306                         value_length = i;
307                         break;
308                 }
309         if (value_length < 0)   /* '\0' was not included in the length */
310                 value_length = nscan;
311         value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
312         if (!value)
313                 return NULL;
314         if (value_length > 0)
315                 memcpy(value, pscan, value_length);
316         ((u8 *)(value))[value_length] = '\0';
317         return value;
318 }
319
320 static void *
321 parser_name_get(struct parser_context *ctx)
322 {
323         struct spar_controlvm_parameters_header *phdr = NULL;
324
325         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
326
327         if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
328                 return NULL;
329
330         ctx->curr = ctx->data + phdr->name_offset;
331         ctx->bytes_remaining = phdr->name_length;
332         return parser_string_get(ctx);
333 }
334
335 struct visor_busdev {
336         u32 bus_no;
337         u32 dev_no;
338 };
339
340 static int match_visorbus_dev_by_id(struct device *dev, void *data)
341 {
342         struct visor_device *vdev = to_visor_device(dev);
343         struct visor_busdev *id = data;
344         u32 bus_no = id->bus_no;
345         u32 dev_no = id->dev_no;
346
347         if ((vdev->chipset_bus_no == bus_no) &&
348             (vdev->chipset_dev_no == dev_no))
349                 return 1;
350
351         return 0;
352 }
353
354 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
355                                                struct visor_device *from)
356 {
357         struct device *dev;
358         struct device *dev_start = NULL;
359         struct visor_device *vdev = NULL;
360         struct visor_busdev id = {
361                         .bus_no = bus_no,
362                         .dev_no = dev_no
363                 };
364
365         if (from)
366                 dev_start = &from->device;
367         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
368                               match_visorbus_dev_by_id);
369         if (dev)
370                 vdev = to_visor_device(dev);
371         return vdev;
372 }
373
374 static void
375 controlvm_init_response(struct controlvm_message *msg,
376                         struct controlvm_message_header *msg_hdr, int response)
377 {
378         memset(msg, 0, sizeof(struct controlvm_message));
379         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
380         msg->hdr.payload_bytes = 0;
381         msg->hdr.payload_vm_offset = 0;
382         msg->hdr.payload_max_bytes = 0;
383         if (response < 0) {
384                 msg->hdr.flags.failed = 1;
385                 msg->hdr.completion_status = (u32)(-response);
386         }
387 }
388
389 static int
390 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
391                                int response,
392                                enum ultra_chipset_feature features)
393 {
394         struct controlvm_message outmsg;
395
396         controlvm_init_response(&outmsg, msg_hdr, response);
397         outmsg.cmd.init_chipset.features = features;
398         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
399                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
400 }
401
402 static int
403 chipset_init(struct controlvm_message *inmsg)
404 {
405         static int chipset_inited;
406         enum ultra_chipset_feature features = 0;
407         int rc = CONTROLVM_RESP_SUCCESS;
408         int res = 0;
409
410         POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
411         if (chipset_inited) {
412                 rc = -CONTROLVM_RESP_ALREADY_DONE;
413                 res = -EIO;
414                 goto out_respond;
415         }
416         chipset_inited = 1;
417         POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
418
419         /*
420          * Set features to indicate we support parahotplug (if Command
421          * also supports it).
422          */
423         features = inmsg->cmd.init_chipset.features &
424                    ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
425
426         /*
427          * Set the "reply" bit so Command knows this is a
428          * features-aware driver.
429          */
430         features |= ULTRA_CHIPSET_FEATURE_REPLY;
431
432 out_respond:
433         if (inmsg->hdr.flags.response_expected)
434                 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
435
436         return res;
437 }
438
439 static int
440 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
441 {
442         struct controlvm_message outmsg;
443
444         controlvm_init_response(&outmsg, msg_hdr, response);
445         if (outmsg.hdr.flags.test_message == 1)
446                 return -EINVAL;
447
448         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
449                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
450 }
451
452 static int controlvm_respond_physdev_changestate(
453                 struct controlvm_message_header *msg_hdr, int response,
454                 struct spar_segment_state state)
455 {
456         struct controlvm_message outmsg;
457
458         controlvm_init_response(&outmsg, msg_hdr, response);
459         outmsg.cmd.device_change_state.state = state;
460         outmsg.cmd.device_change_state.flags.phys_device = 1;
461         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
462                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
463 }
464
465 enum crash_obj_type {
466         CRASH_DEV,
467         CRASH_BUS,
468 };
469
470 static int
471 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
472 {
473         u32 local_crash_msg_offset;
474         u16 local_crash_msg_count;
475         int err;
476
477         err = visorchannel_read(chipset_dev->controlvm_channel,
478                                 offsetof(struct spar_controlvm_channel_protocol,
479                                          saved_crash_message_count),
480                                 &local_crash_msg_count, sizeof(u16));
481         if (err) {
482                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
483                                DIAG_SEVERITY_ERR);
484                 return err;
485         }
486
487         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
488                 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
489                                local_crash_msg_count,
490                                DIAG_SEVERITY_ERR);
491                 return -EIO;
492         }
493
494         err = visorchannel_read(chipset_dev->controlvm_channel,
495                                 offsetof(struct spar_controlvm_channel_protocol,
496                                          saved_crash_message_offset),
497                                 &local_crash_msg_offset, sizeof(u32));
498         if (err) {
499                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
500                                DIAG_SEVERITY_ERR);
501                 return err;
502         }
503
504         switch (typ) {
505         case CRASH_DEV:
506                 local_crash_msg_offset += sizeof(struct controlvm_message);
507                 err = visorchannel_write(chipset_dev->controlvm_channel,
508                                          local_crash_msg_offset,
509                                          msg,
510                                          sizeof(struct controlvm_message));
511                 if (err) {
512                         POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
513                                        DIAG_SEVERITY_ERR);
514                         return err;
515                 }
516                 break;
517         case CRASH_BUS:
518                 err = visorchannel_write(chipset_dev->controlvm_channel,
519                                          local_crash_msg_offset,
520                                          msg,
521                                          sizeof(struct controlvm_message));
522                 if (err) {
523                         POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
524                                        DIAG_SEVERITY_ERR);
525                         return err;
526                 }
527                 break;
528         default:
529                 pr_info("Invalid crash_obj_type\n");
530                 break;
531         }
532         return 0;
533 }
534
535 static int
536 bus_responder(enum controlvm_id cmd_id,
537               struct controlvm_message_header *pending_msg_hdr,
538               int response)
539 {
540         if (!pending_msg_hdr)
541                 return -EIO;
542
543         if (pending_msg_hdr->id != (u32)cmd_id)
544                 return -EINVAL;
545
546         return controlvm_respond(pending_msg_hdr, response);
547 }
548
549 static int
550 device_changestate_responder(enum controlvm_id cmd_id,
551                              struct visor_device *p, int response,
552                              struct spar_segment_state response_state)
553 {
554         struct controlvm_message outmsg;
555         u32 bus_no = p->chipset_bus_no;
556         u32 dev_no = p->chipset_dev_no;
557
558         if (!p->pending_msg_hdr)
559                 return -EIO;
560         if (p->pending_msg_hdr->id != cmd_id)
561                 return -EINVAL;
562
563         controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
564
565         outmsg.cmd.device_change_state.bus_no = bus_no;
566         outmsg.cmd.device_change_state.dev_no = dev_no;
567         outmsg.cmd.device_change_state.state = response_state;
568
569         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
570                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
571 }
572
573 static int
574 device_responder(enum controlvm_id cmd_id,
575                  struct controlvm_message_header *pending_msg_hdr,
576                  int response)
577 {
578         if (!pending_msg_hdr)
579                 return -EIO;
580
581         if (pending_msg_hdr->id != (u32)cmd_id)
582                 return -EINVAL;
583
584         return controlvm_respond(pending_msg_hdr, response);
585 }
586
587 static int
588 bus_create(struct controlvm_message *inmsg)
589 {
590         struct controlvm_message_packet *cmd = &inmsg->cmd;
591         struct controlvm_message_header *pmsg_hdr = NULL;
592         u32 bus_no = cmd->create_bus.bus_no;
593         struct visor_device *bus_info;
594         struct visorchannel *visorchannel;
595         int err;
596
597         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
598         if (bus_info && (bus_info->state.created == 1)) {
599                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
600                                DIAG_SEVERITY_ERR);
601                 err = -EEXIST;
602                 goto err_respond;
603         }
604
605         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
606         if (!bus_info) {
607                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
608                                DIAG_SEVERITY_ERR);
609                 err = -ENOMEM;
610                 goto err_respond;
611         }
612
613         INIT_LIST_HEAD(&bus_info->list_all);
614         bus_info->chipset_bus_no = bus_no;
615         bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
616
617         POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
618
619         if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
620                 err = save_crash_message(inmsg, CRASH_BUS);
621                 if (err)
622                         goto err_free_bus_info;
623         }
624
625         if (inmsg->hdr.flags.response_expected == 1) {
626                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
627                                    GFP_KERNEL);
628                 if (!pmsg_hdr) {
629                         POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
630                                        bus_info->chipset_bus_no,
631                                        DIAG_SEVERITY_ERR);
632                         err = -ENOMEM;
633                         goto err_free_bus_info;
634                 }
635
636                 memcpy(pmsg_hdr, &inmsg->hdr,
637                        sizeof(struct controlvm_message_header));
638                 bus_info->pending_msg_hdr = pmsg_hdr;
639         }
640
641         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
642                                            cmd->create_bus.channel_bytes,
643                                            GFP_KERNEL,
644                                            cmd->create_bus.bus_data_type_uuid);
645
646         if (!visorchannel) {
647                 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
648                                DIAG_SEVERITY_ERR);
649                 err = -ENOMEM;
650                 goto err_free_pending_msg;
651         }
652         bus_info->visorchannel = visorchannel;
653
654         /* Response will be handled by chipset_bus_create */
655         chipset_bus_create(bus_info);
656
657         POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
658         return 0;
659
660 err_free_pending_msg:
661         kfree(bus_info->pending_msg_hdr);
662
663 err_free_bus_info:
664         kfree(bus_info);
665
666 err_respond:
667         if (inmsg->hdr.flags.response_expected == 1)
668                 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
669         return err;
670 }
671
672 static int
673 bus_destroy(struct controlvm_message *inmsg)
674 {
675         struct controlvm_message_packet *cmd = &inmsg->cmd;
676         struct controlvm_message_header *pmsg_hdr = NULL;
677         u32 bus_no = cmd->destroy_bus.bus_no;
678         struct visor_device *bus_info;
679         int err;
680
681         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
682         if (!bus_info) {
683                 err = -ENODEV;
684                 goto err_respond;
685         }
686         if (bus_info->state.created == 0) {
687                 err = -ENOENT;
688                 goto err_respond;
689         }
690         if (bus_info->pending_msg_hdr) {
691                 /* only non-NULL if dev is still waiting on a response */
692                 err = -EEXIST;
693                 goto err_respond;
694         }
695         if (inmsg->hdr.flags.response_expected == 1) {
696                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
697                 if (!pmsg_hdr) {
698                         POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
699                                        bus_info->chipset_bus_no,
700                                        DIAG_SEVERITY_ERR);
701                         err = -ENOMEM;
702                         goto err_respond;
703                 }
704
705                 memcpy(pmsg_hdr, &inmsg->hdr,
706                        sizeof(struct controlvm_message_header));
707                 bus_info->pending_msg_hdr = pmsg_hdr;
708         }
709
710         /* Response will be handled by chipset_bus_destroy */
711         chipset_bus_destroy(bus_info);
712         return 0;
713
714 err_respond:
715         if (inmsg->hdr.flags.response_expected == 1)
716                 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
717         return err;
718 }
719
720 static int
721 bus_configure(struct controlvm_message *inmsg,
722               struct parser_context *parser_ctx)
723 {
724         struct controlvm_message_packet *cmd = &inmsg->cmd;
725         u32 bus_no;
726         struct visor_device *bus_info;
727         int err = 0;
728
729         bus_no = cmd->configure_bus.bus_no;
730         POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
731                        DIAG_SEVERITY_PRINT);
732
733         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
734         if (!bus_info) {
735                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
736                                DIAG_SEVERITY_ERR);
737                 err = -EINVAL;
738                 goto err_respond;
739         } else if (bus_info->state.created == 0) {
740                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
741                                DIAG_SEVERITY_ERR);
742                 err = -EINVAL;
743                 goto err_respond;
744         } else if (bus_info->pending_msg_hdr) {
745                 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
746                                DIAG_SEVERITY_ERR);
747                 err = -EIO;
748                 goto err_respond;
749         }
750
751         err = visorchannel_set_clientpartition
752                 (bus_info->visorchannel,
753                  cmd->configure_bus.guest_handle);
754         if (err)
755                 goto err_respond;
756
757         if (parser_ctx) {
758                 bus_info->partition_uuid = parser_id_get(parser_ctx);
759                 bus_info->name = parser_name_get(parser_ctx);
760         }
761
762         POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
763                        DIAG_SEVERITY_PRINT);
764
765         if (inmsg->hdr.flags.response_expected == 1)
766                 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
767         return 0;
768
769 err_respond:
770         if (inmsg->hdr.flags.response_expected == 1)
771                 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
772         return err;
773 }
774
775 static int
776 my_device_create(struct controlvm_message *inmsg)
777 {
778         struct controlvm_message_packet *cmd = &inmsg->cmd;
779         struct controlvm_message_header *pmsg_hdr = NULL;
780         u32 bus_no = cmd->create_device.bus_no;
781         u32 dev_no = cmd->create_device.dev_no;
782         struct visor_device *dev_info = NULL;
783         struct visor_device *bus_info;
784         struct visorchannel *visorchannel;
785         int err;
786
787         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
788         if (!bus_info) {
789                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
790                                DIAG_SEVERITY_ERR);
791                 err = -ENODEV;
792                 goto err_respond;
793         }
794
795         if (bus_info->state.created == 0) {
796                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
797                                DIAG_SEVERITY_ERR);
798                 err = -EINVAL;
799                 goto err_respond;
800         }
801
802         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
803         if (dev_info && (dev_info->state.created == 1)) {
804                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
805                                DIAG_SEVERITY_ERR);
806                 err = -EEXIST;
807                 goto err_respond;
808         }
809
810         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
811         if (!dev_info) {
812                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
813                                DIAG_SEVERITY_ERR);
814                 err = -ENOMEM;
815                 goto err_respond;
816         }
817
818         dev_info->chipset_bus_no = bus_no;
819         dev_info->chipset_dev_no = dev_no;
820         dev_info->inst = cmd->create_device.dev_inst_uuid;
821
822         /* not sure where the best place to set the 'parent' */
823         dev_info->device.parent = &bus_info->device;
824
825         POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
826                        DIAG_SEVERITY_PRINT);
827
828         visorchannel =
829                visorchannel_create_with_lock(cmd->create_device.channel_addr,
830                                              cmd->create_device.channel_bytes,
831                                              GFP_KERNEL,
832                                              cmd->create_device.data_type_uuid);
833
834         if (!visorchannel) {
835                 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
836                                DIAG_SEVERITY_ERR);
837                 err = -ENOMEM;
838                 goto err_free_dev_info;
839         }
840         dev_info->visorchannel = visorchannel;
841         dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
842         if (uuid_le_cmp(cmd->create_device.data_type_uuid,
843                         spar_vhba_channel_protocol_uuid) == 0) {
844                 err = save_crash_message(inmsg, CRASH_DEV);
845                 if (err)
846                         goto err_free_dev_info;
847         }
848
849         if (inmsg->hdr.flags.response_expected == 1) {
850                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
851                 if (!pmsg_hdr) {
852                         err = -ENOMEM;
853                         goto err_free_dev_info;
854                 }
855
856                 memcpy(pmsg_hdr, &inmsg->hdr,
857                        sizeof(struct controlvm_message_header));
858                 dev_info->pending_msg_hdr = pmsg_hdr;
859         }
860         /* Chipset_device_create will send response */
861         chipset_device_create(dev_info);
862         POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
863                        DIAG_SEVERITY_PRINT);
864         return 0;
865
866 err_free_dev_info:
867         kfree(dev_info);
868
869 err_respond:
870         if (inmsg->hdr.flags.response_expected == 1)
871                 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
872         return err;
873 }
874
875 static int
876 my_device_changestate(struct controlvm_message *inmsg)
877 {
878         struct controlvm_message_packet *cmd = &inmsg->cmd;
879         struct controlvm_message_header *pmsg_hdr = NULL;
880         u32 bus_no = cmd->device_change_state.bus_no;
881         u32 dev_no = cmd->device_change_state.dev_no;
882         struct spar_segment_state state = cmd->device_change_state.state;
883         struct visor_device *dev_info;
884         int err;
885
886         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
887         if (!dev_info) {
888                 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
889                                DIAG_SEVERITY_ERR);
890                 err = -ENODEV;
891                 goto err_respond;
892         }
893         if (dev_info->state.created == 0) {
894                 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
895                                DIAG_SEVERITY_ERR);
896                 err = -EINVAL;
897                 goto err_respond;
898         }
899         if (dev_info->pending_msg_hdr) {
900                 /* only non-NULL if dev is still waiting on a response */
901                 err = -EIO;
902                 goto err_respond;
903         }
904         if (inmsg->hdr.flags.response_expected == 1) {
905                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
906                 if (!pmsg_hdr) {
907                         err = -ENOMEM;
908                         goto err_respond;
909                 }
910
911                 memcpy(pmsg_hdr, &inmsg->hdr,
912                        sizeof(struct controlvm_message_header));
913                 dev_info->pending_msg_hdr = pmsg_hdr;
914         }
915
916         if (state.alive == segment_state_running.alive &&
917             state.operating == segment_state_running.operating)
918                 /* Response will be sent from chipset_device_resume */
919                 chipset_device_resume(dev_info);
920         /* ServerNotReady / ServerLost / SegmentStateStandby */
921         else if (state.alive == segment_state_standby.alive &&
922                  state.operating == segment_state_standby.operating)
923                 /*
924                  * technically this is standby case where server is lost.
925                  * Response will be sent from chipset_device_pause.
926                  */
927                 chipset_device_pause(dev_info);
928         return 0;
929
930 err_respond:
931         if (inmsg->hdr.flags.response_expected == 1)
932                 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
933         return err;
934 }
935
936 static int
937 my_device_destroy(struct controlvm_message *inmsg)
938 {
939         struct controlvm_message_packet *cmd = &inmsg->cmd;
940         struct controlvm_message_header *pmsg_hdr = NULL;
941         u32 bus_no = cmd->destroy_device.bus_no;
942         u32 dev_no = cmd->destroy_device.dev_no;
943         struct visor_device *dev_info;
944         int err;
945
946         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
947         if (!dev_info) {
948                 err = -ENODEV;
949                 goto err_respond;
950         }
951         if (dev_info->state.created == 0) {
952                 err = -EINVAL;
953                 goto err_respond;
954         }
955
956         if (dev_info->pending_msg_hdr) {
957                 /* only non-NULL if dev is still waiting on a response */
958                 err = -EIO;
959                 goto err_respond;
960         }
961         if (inmsg->hdr.flags.response_expected == 1) {
962                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
963                 if (!pmsg_hdr) {
964                         err = -ENOMEM;
965                         goto err_respond;
966                 }
967
968                 memcpy(pmsg_hdr, &inmsg->hdr,
969                        sizeof(struct controlvm_message_header));
970                 dev_info->pending_msg_hdr = pmsg_hdr;
971         }
972
973         chipset_device_destroy(dev_info);
974         return 0;
975
976 err_respond:
977         if (inmsg->hdr.flags.response_expected == 1)
978                 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
979         return err;
980 }
981
982 /*
983  * The general parahotplug flow works as follows. The visorchipset receives
984  * a DEVICE_CHANGESTATE message from Command specifying a physical device
985  * to enable or disable. The CONTROLVM message handler calls
986  * parahotplug_process_message, which then adds the message to a global list
987  * and kicks off a udev event which causes a user level script to enable or
988  * disable the specified device. The udev script then writes to
989  * /sys/devices/platform/visorchipset/parahotplug, which causes the
990  * parahotplug store functions to get called, at which point the
991  * appropriate CONTROLVM message is retrieved from the list and responded
992  * to.
993  */
994
995 #define PARAHOTPLUG_TIMEOUT_MS 2000
996
997 /*
998  * parahotplug_next_id() - generate unique int to match an outstanding
999  *                         CONTROLVM message with a udev script /sys
1000  *                         response
1001  *
1002  * Return: a unique integer value
1003  */
1004 static int
1005 parahotplug_next_id(void)
1006 {
1007         static atomic_t id = ATOMIC_INIT(0);
1008
1009         return atomic_inc_return(&id);
1010 }
1011
1012 /*
1013  * parahotplug_next_expiration() - returns the time (in jiffies) when a
1014  *                                 CONTROLVM message on the list should expire
1015  *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
1016  *
1017  * Return: expected expiration time (in jiffies)
1018  */
1019 static unsigned long
1020 parahotplug_next_expiration(void)
1021 {
1022         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1023 }
1024
1025 /*
1026  * parahotplug_request_create() - create a parahotplug_request, which is
1027  *                                basically a wrapper for a CONTROLVM_MESSAGE
1028  *                                that we can stick on a list
1029  * @msg: the message to insert in the request
1030  *
1031  * Return: the request containing the provided message
1032  */
1033 static struct parahotplug_request *
1034 parahotplug_request_create(struct controlvm_message *msg)
1035 {
1036         struct parahotplug_request *req;
1037
1038         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1039         if (!req)
1040                 return NULL;
1041
1042         req->id = parahotplug_next_id();
1043         req->expiration = parahotplug_next_expiration();
1044         req->msg = *msg;
1045
1046         return req;
1047 }
1048
1049 /*
1050  * parahotplug_request_destroy() - free a parahotplug_request
1051  * @req: the request to deallocate
1052  */
1053 static void
1054 parahotplug_request_destroy(struct parahotplug_request *req)
1055 {
1056         kfree(req);
1057 }
1058
1059 static LIST_HEAD(parahotplug_request_list);
1060 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
1061
1062 /*
1063  * parahotplug_request_complete() - mark request as complete
1064  * @id:     the id of the request
1065  * @active: indicates whether the request is assigned to active partition
1066  *
1067  * Called from the /sys handler, which means the user script has
1068  * finished the enable/disable. Find the matching identifier, and
1069  * respond to the CONTROLVM message with success.
1070  *
1071  * Return: 0 on success or -EINVAL on failure
1072  */
1073 static int
1074 parahotplug_request_complete(int id, u16 active)
1075 {
1076         struct list_head *pos;
1077         struct list_head *tmp;
1078
1079         spin_lock(&parahotplug_request_list_lock);
1080
1081         /* Look for a request matching "id". */
1082         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1083                 struct parahotplug_request *req =
1084                     list_entry(pos, struct parahotplug_request, list);
1085                 if (req->id == id) {
1086                         /*
1087                          * Found a match. Remove it from the list and
1088                          * respond.
1089                          */
1090                         list_del(pos);
1091                         spin_unlock(&parahotplug_request_list_lock);
1092                         req->msg.cmd.device_change_state.state.active = active;
1093                         if (req->msg.hdr.flags.response_expected)
1094                                 controlvm_respond_physdev_changestate(
1095                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1096                                         req->msg.cmd.device_change_state.state);
1097                         parahotplug_request_destroy(req);
1098                         return 0;
1099                 }
1100         }
1101
1102         spin_unlock(&parahotplug_request_list_lock);
1103         return -EINVAL;
1104 }
1105
1106 /*
1107  * devicedisabled_store() - disables the hotplug device
1108  * @dev:   sysfs interface variable not utilized in this function
1109  * @attr:  sysfs interface variable not utilized in this function
1110  * @buf:   buffer containing the device id
1111  * @count: the size of the buffer
1112  *
1113  * The parahotplug/devicedisabled interface gets called by our support script
1114  * when an SR-IOV device has been shut down. The ID is passed to the script
1115  * and then passed back when the device has been removed.
1116  *
1117  * Return: the size of the buffer for success or negative for error
1118  */
1119 static ssize_t devicedisabled_store(struct device *dev,
1120                                     struct device_attribute *attr,
1121                                     const char *buf, size_t count)
1122 {
1123         unsigned int id;
1124         int err;
1125
1126         if (kstrtouint(buf, 10, &id))
1127                 return -EINVAL;
1128
1129         err = parahotplug_request_complete(id, 0);
1130         if (err < 0)
1131                 return err;
1132         return count;
1133 }
1134 static DEVICE_ATTR_WO(devicedisabled);
1135
1136 /*
1137  * deviceenabled_store() - enables the hotplug device
1138  * @dev:   sysfs interface variable not utilized in this function
1139  * @attr:  sysfs interface variable not utilized in this function
1140  * @buf:   buffer containing the device id
1141  * @count: the size of the buffer
1142  *
1143  * The parahotplug/deviceenabled interface gets called by our support script
1144  * when an SR-IOV device has been recovered. The ID is passed to the script
1145  * and then passed back when the device has been brought back up.
1146  *
1147  * Return: the size of the buffer for success or negative for error
1148  */
1149 static ssize_t deviceenabled_store(struct device *dev,
1150                                    struct device_attribute *attr,
1151                                    const char *buf, size_t count)
1152 {
1153         unsigned int id;
1154
1155         if (kstrtouint(buf, 10, &id))
1156                 return -EINVAL;
1157
1158         parahotplug_request_complete(id, 1);
1159         return count;
1160 }
1161 static DEVICE_ATTR_WO(deviceenabled);
1162
1163 static struct attribute *visorchipset_install_attrs[] = {
1164         &dev_attr_toolaction.attr,
1165         &dev_attr_boottotool.attr,
1166         &dev_attr_error.attr,
1167         &dev_attr_textid.attr,
1168         &dev_attr_remaining_steps.attr,
1169         NULL
1170 };
1171
1172 static const struct attribute_group visorchipset_install_group = {
1173         .name = "install",
1174         .attrs = visorchipset_install_attrs
1175 };
1176
1177 static struct attribute *visorchipset_parahotplug_attrs[] = {
1178         &dev_attr_devicedisabled.attr,
1179         &dev_attr_deviceenabled.attr,
1180         NULL
1181 };
1182
1183 static struct attribute_group visorchipset_parahotplug_group = {
1184         .name = "parahotplug",
1185         .attrs = visorchipset_parahotplug_attrs
1186 };
1187
1188 static const struct attribute_group *visorchipset_dev_groups[] = {
1189         &visorchipset_install_group,
1190         &visorchipset_parahotplug_group,
1191         NULL
1192 };
1193
1194 /*
1195  * parahotplug_request_kickoff() - initiate parahotplug request
1196  * @req: the request to initiate
1197  *
1198  * Cause uevent to run the user level script to do the disable/enable specified
1199  * in the parahotplug_request.
1200  */
1201 static int
1202 parahotplug_request_kickoff(struct parahotplug_request *req)
1203 {
1204         struct controlvm_message_packet *cmd = &req->msg.cmd;
1205         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1206             env_func[40];
1207         char *envp[] = {
1208                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1209         };
1210
1211         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1212         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1213         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1214                 cmd->device_change_state.state.active);
1215         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1216                 cmd->device_change_state.bus_no);
1217         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1218                 cmd->device_change_state.dev_no >> 3);
1219         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1220                 cmd->device_change_state.dev_no & 0x7);
1221
1222         return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1223                                   KOBJ_CHANGE, envp);
1224 }
1225
1226 /*
1227  * parahotplug_process_message() - enables or disables a PCI device by kicking
1228  *                                 off a udev script
1229  * @inmsg: the message indicating whether to enable or disable
1230  */
1231 static int
1232 parahotplug_process_message(struct controlvm_message *inmsg)
1233 {
1234         struct parahotplug_request *req;
1235         int err;
1236
1237         req = parahotplug_request_create(inmsg);
1238
1239         if (!req)
1240                 return -ENOMEM;
1241
1242         if (inmsg->cmd.device_change_state.state.active) {
1243                 /*
1244                  * For enable messages, just respond with success
1245                  * right away. This is a bit of a hack, but there are
1246                  * issues with the early enable messages we get (with
1247                  * either the udev script not detecting that the device
1248                  * is up, or not getting called at all). Fortunately
1249                  * the messages that get lost don't matter anyway, as
1250                  *
1251                  * devices are automatically enabled at
1252                  * initialization.
1253                  */
1254                 err = parahotplug_request_kickoff(req);
1255                 if (err)
1256                         goto err_respond;
1257                 controlvm_respond_physdev_changestate
1258                         (&inmsg->hdr,
1259                          CONTROLVM_RESP_SUCCESS,
1260                          inmsg->cmd.device_change_state.state);
1261                 parahotplug_request_destroy(req);
1262                 return 0;
1263         }
1264
1265         /*
1266          * For disable messages, add the request to the
1267          * request list before kicking off the udev script. It
1268          * won't get responded to until the script has
1269          * indicated it's done.
1270          */
1271         spin_lock(&parahotplug_request_list_lock);
1272         list_add_tail(&req->list, &parahotplug_request_list);
1273         spin_unlock(&parahotplug_request_list_lock);
1274
1275         err = parahotplug_request_kickoff(req);
1276         if (err)
1277                 goto err_respond;
1278         return 0;
1279
1280 err_respond:
1281         controlvm_respond_physdev_changestate
1282                                 (&inmsg->hdr, err,
1283                                  inmsg->cmd.device_change_state.state);
1284         return err;
1285 }
1286
1287 /*
1288  * chipset_ready_uevent() - sends chipset_ready action
1289  *
1290  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1291  *
1292  * Return: 0 on success, negative on failure
1293  */
1294 static int
1295 chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1296 {
1297         int res;
1298
1299         res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1300                              KOBJ_ONLINE);
1301
1302         if (msg_hdr->flags.response_expected)
1303                 controlvm_respond(msg_hdr, res);
1304
1305         return res;
1306 }
1307
1308 /*
1309  * chipset_selftest_uevent() - sends chipset_selftest action
1310  *
1311  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1312  *
1313  * Return: 0 on success, negative on failure
1314  */
1315 static int
1316 chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1317 {
1318         char env_selftest[20];
1319         char *envp[] = { env_selftest, NULL };
1320         int res;
1321
1322         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1323         res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1324                                  KOBJ_CHANGE, envp);
1325
1326         if (msg_hdr->flags.response_expected)
1327                 controlvm_respond(msg_hdr, res);
1328
1329         return res;
1330 }
1331
1332 /*
1333  * chipset_notready_uevent() - sends chipset_notready action
1334  *
1335  * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1336  *
1337  * Return: 0 on success, negative on failure
1338  */
1339 static int
1340 chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1341 {
1342         int res;
1343
1344         res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1345                              KOBJ_OFFLINE);
1346         if (msg_hdr->flags.response_expected)
1347                 controlvm_respond(msg_hdr, res);
1348
1349         return res;
1350 }
1351
1352 static unsigned int
1353 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1354 {
1355         struct vmcall_io_controlvm_addr_params params;
1356         int result = VMCALL_SUCCESS;
1357         u64 physaddr;
1358
1359         physaddr = virt_to_phys(&params);
1360         ISSUE_IO_VMCALL(VMCALL_CONTROLVM_ADDR, physaddr, result);
1361         if (VMCALL_SUCCESSFUL(result)) {
1362                 *control_addr = params.address;
1363                 *control_bytes = params.channel_bytes;
1364         }
1365         return result;
1366 }
1367
1368 static u64 controlvm_get_channel_address(void)
1369 {
1370         u64 addr = 0;
1371         u32 size = 0;
1372
1373         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1374                 return 0;
1375
1376         return addr;
1377 }
1378
1379 static void
1380 setup_crash_devices_work_queue(struct work_struct *work)
1381 {
1382         struct controlvm_message local_crash_bus_msg;
1383         struct controlvm_message local_crash_dev_msg;
1384         struct controlvm_message msg;
1385         u32 local_crash_msg_offset;
1386         u16 local_crash_msg_count;
1387
1388         POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
1389
1390         /* send init chipset msg */
1391         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1392         msg.cmd.init_chipset.bus_count = 23;
1393         msg.cmd.init_chipset.switch_count = 0;
1394
1395         chipset_init(&msg);
1396
1397         /* get saved message count */
1398         if (visorchannel_read(chipset_dev->controlvm_channel,
1399                               offsetof(struct spar_controlvm_channel_protocol,
1400                                        saved_crash_message_count),
1401                               &local_crash_msg_count, sizeof(u16)) < 0) {
1402                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1403                                DIAG_SEVERITY_ERR);
1404                 return;
1405         }
1406
1407         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1408                 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
1409                                local_crash_msg_count,
1410                                DIAG_SEVERITY_ERR);
1411                 return;
1412         }
1413
1414         /* get saved crash message offset */
1415         if (visorchannel_read(chipset_dev->controlvm_channel,
1416                               offsetof(struct spar_controlvm_channel_protocol,
1417                                        saved_crash_message_offset),
1418                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1419                 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1420                                DIAG_SEVERITY_ERR);
1421                 return;
1422         }
1423
1424         /* read create device message for storage bus offset */
1425         if (visorchannel_read(chipset_dev->controlvm_channel,
1426                               local_crash_msg_offset,
1427                               &local_crash_bus_msg,
1428                               sizeof(struct controlvm_message)) < 0) {
1429                 POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
1430                                DIAG_SEVERITY_ERR);
1431                 return;
1432         }
1433
1434         /* read create device message for storage device */
1435         if (visorchannel_read(chipset_dev->controlvm_channel,
1436                               local_crash_msg_offset +
1437                               sizeof(struct controlvm_message),
1438                               &local_crash_dev_msg,
1439                               sizeof(struct controlvm_message)) < 0) {
1440                 POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
1441                                DIAG_SEVERITY_ERR);
1442                 return;
1443         }
1444
1445         /* reuse IOVM create bus message */
1446         if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1447                 bus_create(&local_crash_bus_msg);
1448         } else {
1449                 POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
1450                                DIAG_SEVERITY_ERR);
1451                 return;
1452         }
1453
1454         /* reuse create device message for storage device */
1455         if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1456                 my_device_create(&local_crash_dev_msg);
1457         } else {
1458                 POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
1459                                DIAG_SEVERITY_ERR);
1460                 return;
1461         }
1462         POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1463 }
1464
1465 void
1466 bus_create_response(struct visor_device *bus_info, int response)
1467 {
1468         if (response >= 0)
1469                 bus_info->state.created = 1;
1470
1471         bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1472                       response);
1473
1474         kfree(bus_info->pending_msg_hdr);
1475         bus_info->pending_msg_hdr = NULL;
1476 }
1477
1478 void
1479 bus_destroy_response(struct visor_device *bus_info, int response)
1480 {
1481         bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1482                       response);
1483
1484         kfree(bus_info->pending_msg_hdr);
1485         bus_info->pending_msg_hdr = NULL;
1486 }
1487
1488 void
1489 device_create_response(struct visor_device *dev_info, int response)
1490 {
1491         if (response >= 0)
1492                 dev_info->state.created = 1;
1493
1494         device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1495                          response);
1496
1497         kfree(dev_info->pending_msg_hdr);
1498         dev_info->pending_msg_hdr = NULL;
1499 }
1500
1501 void
1502 device_destroy_response(struct visor_device *dev_info, int response)
1503 {
1504         device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1505                          response);
1506
1507         kfree(dev_info->pending_msg_hdr);
1508         dev_info->pending_msg_hdr = NULL;
1509 }
1510
1511 void
1512 device_pause_response(struct visor_device *dev_info,
1513                       int response)
1514 {
1515         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1516                                      dev_info, response,
1517                                      segment_state_standby);
1518
1519         kfree(dev_info->pending_msg_hdr);
1520         dev_info->pending_msg_hdr = NULL;
1521 }
1522
1523 void
1524 device_resume_response(struct visor_device *dev_info, int response)
1525 {
1526         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1527                                      dev_info, response,
1528                                      segment_state_running);
1529
1530         kfree(dev_info->pending_msg_hdr);
1531         dev_info->pending_msg_hdr = NULL;
1532 }
1533
1534 static struct parser_context *
1535 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1536 {
1537         int allocbytes = sizeof(struct parser_context) + bytes;
1538         struct parser_context *ctx;
1539
1540         *retry = false;
1541
1542         /*
1543          * alloc an 0 extra byte to ensure payload is
1544          * '\0'-terminated
1545          */
1546         allocbytes++;
1547         if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
1548             > MAX_CONTROLVM_PAYLOAD_BYTES) {
1549                 *retry = true;
1550                 return NULL;
1551         }
1552         ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
1553         if (!ctx) {
1554                 *retry = true;
1555                 return NULL;
1556         }
1557
1558         ctx->allocbytes = allocbytes;
1559         ctx->param_bytes = bytes;
1560         ctx->curr = NULL;
1561         ctx->bytes_remaining = 0;
1562         ctx->byte_stream = false;
1563         if (local) {
1564                 void *p;
1565
1566                 if (addr > virt_to_phys(high_memory - 1))
1567                         goto err_finish_ctx;
1568                 p = __va((unsigned long)(addr));
1569                 memcpy(ctx->data, p, bytes);
1570         } else {
1571                 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1572
1573                 if (!mapping)
1574                         goto err_finish_ctx;
1575                 memcpy(ctx->data, mapping, bytes);
1576                 memunmap(mapping);
1577         }
1578
1579         ctx->byte_stream = true;
1580         chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1581
1582         return ctx;
1583
1584 err_finish_ctx:
1585         parser_done(ctx);
1586         return NULL;
1587 }
1588
1589 /*
1590  * handle_command() - process a controlvm message
1591  * @inmsg:        the message to process
1592  * @channel_addr: address of the controlvm channel
1593  *
1594  * Return:
1595  *      0       - Successfully processed the message
1596  *      -EAGAIN - ControlVM message was not processed and should be retried
1597  *                reading the next controlvm message; a scenario where this can
1598  *                occur is when we need to throttle the allocation of memory in
1599  *                which to copy out controlvm payload data.
1600  *      < 0     - error: ControlVM message was processed but an error occurred.
1601  */
1602 static int
1603 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1604 {
1605         struct controlvm_message_packet *cmd = &inmsg.cmd;
1606         u64 parm_addr;
1607         u32 parm_bytes;
1608         struct parser_context *parser_ctx = NULL;
1609         bool local_addr;
1610         struct controlvm_message ackmsg;
1611         int err = 0;
1612
1613         /* create parsing context if necessary */
1614         local_addr = (inmsg.hdr.flags.test_message == 1);
1615         if (channel_addr == 0)
1616                 return -EINVAL;
1617
1618         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1619         parm_bytes = inmsg.hdr.payload_bytes;
1620
1621         /*
1622          * Parameter and channel addresses within test messages actually lie
1623          * within our OS-controlled memory. We need to know that, because it
1624          * makes a difference in how we compute the virtual address.
1625          */
1626         if (parm_addr && parm_bytes) {
1627                 bool retry = false;
1628
1629                 parser_ctx =
1630                     parser_init_byte_stream(parm_addr, parm_bytes,
1631                                             local_addr, &retry);
1632                 if (!parser_ctx && retry)
1633                         return -EAGAIN;
1634         }
1635
1636         if (!local_addr) {
1637                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1638                                         CONTROLVM_RESP_SUCCESS);
1639                 err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1640                                                 CONTROLVM_QUEUE_ACK,
1641                                                 &ackmsg);
1642                 if (err)
1643                         return err;
1644         }
1645         switch (inmsg.hdr.id) {
1646         case CONTROLVM_CHIPSET_INIT:
1647                 err = chipset_init(&inmsg);
1648                 break;
1649         case CONTROLVM_BUS_CREATE:
1650                 err = bus_create(&inmsg);
1651                 break;
1652         case CONTROLVM_BUS_DESTROY:
1653                 err = bus_destroy(&inmsg);
1654                 break;
1655         case CONTROLVM_BUS_CONFIGURE:
1656                 err = bus_configure(&inmsg, parser_ctx);
1657                 break;
1658         case CONTROLVM_DEVICE_CREATE:
1659                 err = my_device_create(&inmsg);
1660                 break;
1661         case CONTROLVM_DEVICE_CHANGESTATE:
1662                 if (cmd->device_change_state.flags.phys_device) {
1663                         err = parahotplug_process_message(&inmsg);
1664                 } else {
1665                         /*
1666                          * save the hdr and cmd structures for later use
1667                          * when sending back the response to Command
1668                          */
1669                         err = my_device_changestate(&inmsg);
1670                         break;
1671                 }
1672                 break;
1673         case CONTROLVM_DEVICE_DESTROY:
1674                 err = my_device_destroy(&inmsg);
1675                 break;
1676         case CONTROLVM_DEVICE_CONFIGURE:
1677                 /* no op just send a respond that we passed */
1678                 if (inmsg.hdr.flags.response_expected)
1679                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1680                 break;
1681         case CONTROLVM_CHIPSET_READY:
1682                 err = chipset_ready_uevent(&inmsg.hdr);
1683                 break;
1684         case CONTROLVM_CHIPSET_SELFTEST:
1685                 err = chipset_selftest_uevent(&inmsg.hdr);
1686                 break;
1687         case CONTROLVM_CHIPSET_STOP:
1688                 err = chipset_notready_uevent(&inmsg.hdr);
1689                 break;
1690         default:
1691                 err = -ENOMSG;
1692                 if (inmsg.hdr.flags.response_expected)
1693                         controlvm_respond(&inmsg.hdr,
1694                                           -CONTROLVM_RESP_ID_UNKNOWN);
1695                 break;
1696         }
1697
1698         if (parser_ctx) {
1699                 parser_done(parser_ctx);
1700                 parser_ctx = NULL;
1701         }
1702         return err;
1703 }
1704
1705 /*
1706  * read_controlvm_event() - retreives the next message from the
1707  *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
1708  *                          channel
1709  * @msg: pointer to the retrieved message
1710  *
1711  * Return: 0 if valid message was retrieved or -error
1712  */
1713 static int
1714 read_controlvm_event(struct controlvm_message *msg)
1715 {
1716         int err;
1717
1718         err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1719                                         CONTROLVM_QUEUE_EVENT, msg);
1720         if (err)
1721                 return err;
1722
1723         /* got a message */
1724         if (msg->hdr.flags.test_message == 1)
1725                 return -EINVAL;
1726
1727         return 0;
1728 }
1729
1730 /*
1731  * parahotplug_process_list() - remove any request from the list that's been on
1732  *                              there too long and respond with an error
1733  */
1734 static void
1735 parahotplug_process_list(void)
1736 {
1737         struct list_head *pos;
1738         struct list_head *tmp;
1739
1740         spin_lock(&parahotplug_request_list_lock);
1741
1742         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1743                 struct parahotplug_request *req =
1744                     list_entry(pos, struct parahotplug_request, list);
1745
1746                 if (!time_after_eq(jiffies, req->expiration))
1747                         continue;
1748
1749                 list_del(pos);
1750                 if (req->msg.hdr.flags.response_expected)
1751                         controlvm_respond_physdev_changestate(
1752                                 &req->msg.hdr,
1753                                 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1754                                 req->msg.cmd.device_change_state.state);
1755                 parahotplug_request_destroy(req);
1756         }
1757
1758         spin_unlock(&parahotplug_request_list_lock);
1759 }
1760
1761 static void
1762 controlvm_periodic_work(struct work_struct *work)
1763 {
1764         struct controlvm_message inmsg;
1765         int err;
1766
1767         /* Drain the RESPONSE queue make it empty */
1768         do {
1769                 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1770                                                 CONTROLVM_QUEUE_RESPONSE,
1771                                                 &inmsg);
1772         } while (!err);
1773
1774         if (err != -EAGAIN)
1775                 goto schedule_out;
1776
1777         if (chipset_dev->controlvm_pending_msg_valid) {
1778                 /*
1779                  * we throttled processing of a prior
1780                  * msg, so try to process it again
1781                  * rather than reading a new one
1782                  */
1783                 inmsg = chipset_dev->controlvm_pending_msg;
1784                 chipset_dev->controlvm_pending_msg_valid = false;
1785                 err = 0;
1786         } else {
1787                 err = read_controlvm_event(&inmsg);
1788         }
1789
1790         while (!err) {
1791                 chipset_dev->most_recent_message_jiffies = jiffies;
1792                 err = handle_command(inmsg,
1793                                      visorchannel_get_physaddr
1794                                      (chipset_dev->controlvm_channel));
1795                 if (err == -EAGAIN) {
1796                         chipset_dev->controlvm_pending_msg = inmsg;
1797                         chipset_dev->controlvm_pending_msg_valid = true;
1798                         break;
1799                 }
1800
1801                 err = read_controlvm_event(&inmsg);
1802         }
1803
1804         /* parahotplug_worker */
1805         parahotplug_process_list();
1806
1807 schedule_out:
1808         if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1809                                 (HZ * MIN_IDLE_SECONDS))) {
1810                 /*
1811                  * it's been longer than MIN_IDLE_SECONDS since we
1812                  * processed our last controlvm message; slow down the
1813                  * polling
1814                  */
1815                 if (chipset_dev->poll_jiffies !=
1816                                               POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1817                         chipset_dev->poll_jiffies =
1818                                               POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1819         } else {
1820                 if (chipset_dev->poll_jiffies !=
1821                                               POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1822                         chipset_dev->poll_jiffies =
1823                                               POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1824         }
1825
1826         schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1827                               chipset_dev->poll_jiffies);
1828 }
1829
1830 static int
1831 visorchipset_init(struct acpi_device *acpi_device)
1832 {
1833         int err = -ENODEV;
1834         u64 addr;
1835         uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
1836         struct visorchannel *controlvm_channel;
1837
1838         addr = controlvm_get_channel_address();
1839         if (!addr)
1840                 goto error;
1841
1842         chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1843         if (!chipset_dev)
1844                 goto error;
1845
1846         acpi_device->driver_data = chipset_dev;
1847
1848         chipset_dev->acpi_device = acpi_device;
1849         chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1850         controlvm_channel = visorchannel_create_with_lock(addr,
1851                                                           0, GFP_KERNEL, uuid);
1852
1853         if (!controlvm_channel)
1854                 goto error_free_chipset_dev;
1855
1856         chipset_dev->controlvm_channel = controlvm_channel;
1857
1858         err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1859                                   visorchipset_dev_groups);
1860         if (err < 0)
1861                 goto error_destroy_channel;
1862
1863         if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
1864                                 visorchannel_get_header(controlvm_channel)))
1865                 goto error_delete_groups;
1866
1867         /* if booting in a crash kernel */
1868         if (is_kdump_kernel())
1869                 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1870                                   setup_crash_devices_work_queue);
1871         else
1872                 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1873                                   controlvm_periodic_work);
1874
1875         chipset_dev->most_recent_message_jiffies = jiffies;
1876         chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1877         schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1878                               chipset_dev->poll_jiffies);
1879
1880         POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
1881
1882         err = visorbus_init();
1883         if (err < 0)
1884                 goto error_cancel_work;
1885
1886         return 0;
1887
1888 error_cancel_work:
1889         cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1890
1891 error_delete_groups:
1892         sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1893                             visorchipset_dev_groups);
1894
1895 error_destroy_channel:
1896         visorchannel_destroy(chipset_dev->controlvm_channel);
1897
1898 error_free_chipset_dev:
1899         kfree(chipset_dev);
1900
1901 error:
1902         POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
1903         return err;
1904 }
1905
1906 static int
1907 visorchipset_exit(struct acpi_device *acpi_device)
1908 {
1909         POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1910
1911         visorbus_exit();
1912         cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1913         sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1914                             visorchipset_dev_groups);
1915
1916         visorchannel_destroy(chipset_dev->controlvm_channel);
1917         kfree(chipset_dev);
1918
1919         POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1920
1921         return 0;
1922 }
1923
1924 static const struct acpi_device_id unisys_device_ids[] = {
1925         {"PNP0A07", 0},
1926         {"", 0},
1927 };
1928
1929 static struct acpi_driver unisys_acpi_driver = {
1930         .name = "unisys_acpi",
1931         .class = "unisys_acpi_class",
1932         .owner = THIS_MODULE,
1933         .ids = unisys_device_ids,
1934         .ops = {
1935                 .add = visorchipset_init,
1936                 .remove = visorchipset_exit,
1937                 },
1938 };
1939
1940 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1941
1942 static __init uint32_t visorutil_spar_detect(void)
1943 {
1944         unsigned int eax, ebx, ecx, edx;
1945
1946         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1947                 /* check the ID */
1948                 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1949                 return  (ebx == UNISYS_SPAR_ID_EBX) &&
1950                         (ecx == UNISYS_SPAR_ID_ECX) &&
1951                         (edx == UNISYS_SPAR_ID_EDX);
1952         } else {
1953                 return 0;
1954         }
1955 }
1956
1957 static int init_unisys(void)
1958 {
1959         int result;
1960
1961         if (!visorutil_spar_detect())
1962                 return -ENODEV;
1963
1964         result = acpi_bus_register_driver(&unisys_acpi_driver);
1965         if (result)
1966                 return -ENODEV;
1967
1968         pr_info("Unisys Visorchipset Driver Loaded.\n");
1969         return 0;
1970 };
1971
1972 static void exit_unisys(void)
1973 {
1974         acpi_bus_unregister_driver(&unisys_acpi_driver);
1975 }
1976
1977 module_init(init_unisys);
1978 module_exit(exit_unisys);
1979
1980 MODULE_AUTHOR("Unisys");
1981 MODULE_LICENSE("GPL");
1982 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");