3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
17 #include <linux/acpi.h>
18 #include <linux/ctype.h>
21 #include <linux/nls.h>
22 #include <linux/netdevice.h>
23 #include <linux/uuid.h>
24 #include <linux/crash_dump.h>
27 #include "visorbus_private.h"
28 #include "vmcallinterface.h"
30 #define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
32 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
33 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
35 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
37 #define UNISYS_SPAR_LEAF_ID 0x40000000
39 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
40 #define UNISYS_SPAR_ID_EBX 0x73696e55
41 #define UNISYS_SPAR_ID_ECX 0x70537379
42 #define UNISYS_SPAR_ID_EDX 0x34367261
45 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
46 * we switch to slow polling mode. As soon as we get a controlvm
47 * message, we switch back to fast polling mode.
49 #define MIN_IDLE_SECONDS 10
51 struct parser_context {
52 unsigned long allocbytes;
53 unsigned long param_bytes;
55 unsigned long bytes_remaining;
60 struct visorchipset_device {
61 struct acpi_device *acpi_device;
62 unsigned long poll_jiffies;
63 /* when we got our last controlvm message */
64 unsigned long most_recent_message_jiffies;
65 struct delayed_work periodic_controlvm_work;
66 struct visorchannel *controlvm_channel;
67 unsigned long controlvm_payload_bytes_buffered;
69 * The following variables are used to handle the scenario where we are
70 * unable to offload the payload from a controlvm message due to memory
71 * requirements. In this scenario, we simply stash the controlvm
72 * message, then attempt to process it again the next time
73 * controlvm_periodic_work() runs.
75 struct controlvm_message controlvm_pending_msg;
76 bool controlvm_pending_msg_valid;
79 static struct visorchipset_device *chipset_dev;
81 struct parahotplug_request {
82 struct list_head list;
84 unsigned long expiration;
85 struct controlvm_message msg;
88 /* prototypes for attributes */
89 static ssize_t toolaction_show(struct device *dev,
90 struct device_attribute *attr,
96 err = visorchannel_read(chipset_dev->controlvm_channel,
97 offsetof(struct spar_controlvm_channel_protocol,
99 &tool_action, sizeof(u8));
103 return sprintf(buf, "%u\n", tool_action);
106 static ssize_t toolaction_store(struct device *dev,
107 struct device_attribute *attr,
108 const char *buf, size_t count)
113 if (kstrtou8(buf, 10, &tool_action))
116 err = visorchannel_write
117 (chipset_dev->controlvm_channel,
118 offsetof(struct spar_controlvm_channel_protocol,
120 &tool_action, sizeof(u8));
126 static DEVICE_ATTR_RW(toolaction);
128 static ssize_t boottotool_show(struct device *dev,
129 struct device_attribute *attr,
132 struct efi_spar_indication efi_spar_indication;
135 err = visorchannel_read(chipset_dev->controlvm_channel,
136 offsetof(struct spar_controlvm_channel_protocol,
138 &efi_spar_indication,
139 sizeof(struct efi_spar_indication));
143 return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
146 static ssize_t boottotool_store(struct device *dev,
147 struct device_attribute *attr,
148 const char *buf, size_t count)
151 struct efi_spar_indication efi_spar_indication;
153 if (kstrtoint(buf, 10, &val))
156 efi_spar_indication.boot_to_tool = val;
157 err = visorchannel_write
158 (chipset_dev->controlvm_channel,
159 offsetof(struct spar_controlvm_channel_protocol,
160 efi_spar_ind), &(efi_spar_indication),
161 sizeof(struct efi_spar_indication));
167 static DEVICE_ATTR_RW(boottotool);
169 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
175 err = visorchannel_read(chipset_dev->controlvm_channel,
176 offsetof(struct spar_controlvm_channel_protocol,
178 &error, sizeof(u32));
181 return sprintf(buf, "%i\n", error);
184 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
185 const char *buf, size_t count)
190 if (kstrtou32(buf, 10, &error))
193 err = visorchannel_write
194 (chipset_dev->controlvm_channel,
195 offsetof(struct spar_controlvm_channel_protocol,
197 &error, sizeof(u32));
202 static DEVICE_ATTR_RW(error);
204 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
210 err = visorchannel_read
211 (chipset_dev->controlvm_channel,
212 offsetof(struct spar_controlvm_channel_protocol,
213 installation_text_id),
214 &text_id, sizeof(u32));
218 return sprintf(buf, "%i\n", text_id);
221 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
222 const char *buf, size_t count)
227 if (kstrtou32(buf, 10, &text_id))
230 err = visorchannel_write
231 (chipset_dev->controlvm_channel,
232 offsetof(struct spar_controlvm_channel_protocol,
233 installation_text_id),
234 &text_id, sizeof(u32));
239 static DEVICE_ATTR_RW(textid);
241 static ssize_t remaining_steps_show(struct device *dev,
242 struct device_attribute *attr, char *buf)
244 u16 remaining_steps = 0;
247 err = visorchannel_read(chipset_dev->controlvm_channel,
248 offsetof(struct spar_controlvm_channel_protocol,
249 installation_remaining_steps),
250 &remaining_steps, sizeof(u16));
254 return sprintf(buf, "%hu\n", remaining_steps);
257 static ssize_t remaining_steps_store(struct device *dev,
258 struct device_attribute *attr,
259 const char *buf, size_t count)
264 if (kstrtou16(buf, 10, &remaining_steps))
267 err = visorchannel_write
268 (chipset_dev->controlvm_channel,
269 offsetof(struct spar_controlvm_channel_protocol,
270 installation_remaining_steps),
271 &remaining_steps, sizeof(u16));
276 static DEVICE_ATTR_RW(remaining_steps);
279 parser_id_get(struct parser_context *ctx)
281 struct spar_controlvm_parameters_header *phdr = NULL;
283 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
287 static void parser_done(struct parser_context *ctx)
289 chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
294 parser_string_get(struct parser_context *ctx)
298 int value_length = -1;
303 nscan = ctx->bytes_remaining;
308 for (i = 0, value_length = -1; i < nscan; i++)
309 if (pscan[i] == '\0') {
313 if (value_length < 0) /* '\0' was not included in the length */
314 value_length = nscan;
315 value = kmalloc(value_length + 1, GFP_KERNEL);
318 if (value_length > 0)
319 memcpy(value, pscan, value_length);
320 ((u8 *)(value))[value_length] = '\0';
325 parser_name_get(struct parser_context *ctx)
327 struct spar_controlvm_parameters_header *phdr = NULL;
329 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
331 if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
334 ctx->curr = ctx->data + phdr->name_offset;
335 ctx->bytes_remaining = phdr->name_length;
336 return parser_string_get(ctx);
339 struct visor_busdev {
344 static int match_visorbus_dev_by_id(struct device *dev, void *data)
346 struct visor_device *vdev = to_visor_device(dev);
347 struct visor_busdev *id = data;
348 u32 bus_no = id->bus_no;
349 u32 dev_no = id->dev_no;
351 if ((vdev->chipset_bus_no == bus_no) &&
352 (vdev->chipset_dev_no == dev_no))
358 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
359 struct visor_device *from)
362 struct device *dev_start = NULL;
363 struct visor_device *vdev = NULL;
364 struct visor_busdev id = {
370 dev_start = &from->device;
371 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
372 match_visorbus_dev_by_id);
374 vdev = to_visor_device(dev);
379 controlvm_init_response(struct controlvm_message *msg,
380 struct controlvm_message_header *msg_hdr, int response)
382 memset(msg, 0, sizeof(struct controlvm_message));
383 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
384 msg->hdr.payload_bytes = 0;
385 msg->hdr.payload_vm_offset = 0;
386 msg->hdr.payload_max_bytes = 0;
388 msg->hdr.flags.failed = 1;
389 msg->hdr.completion_status = (u32)(-response);
394 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
396 enum ultra_chipset_feature features)
398 struct controlvm_message outmsg;
400 controlvm_init_response(&outmsg, msg_hdr, response);
401 outmsg.cmd.init_chipset.features = features;
402 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
403 CONTROLVM_QUEUE_REQUEST, &outmsg);
407 chipset_init(struct controlvm_message *inmsg)
409 static int chipset_inited;
410 enum ultra_chipset_feature features = 0;
411 int rc = CONTROLVM_RESP_SUCCESS;
414 if (chipset_inited) {
415 rc = -CONTROLVM_RESP_ALREADY_DONE;
422 * Set features to indicate we support parahotplug (if Command
425 features = inmsg->cmd.init_chipset.features &
426 ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
429 * Set the "reply" bit so Command knows this is a
430 * features-aware driver.
432 features |= ULTRA_CHIPSET_FEATURE_REPLY;
435 if (inmsg->hdr.flags.response_expected)
436 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
442 controlvm_respond(struct controlvm_message_header *msg_hdr, int response,
443 struct spar_segment_state *state)
445 struct controlvm_message outmsg;
447 controlvm_init_response(&outmsg, msg_hdr, response);
448 if (outmsg.hdr.flags.test_message == 1)
452 outmsg.cmd.device_change_state.state = *state;
453 outmsg.cmd.device_change_state.flags.phys_device = 1;
456 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
457 CONTROLVM_QUEUE_REQUEST, &outmsg);
460 enum crash_obj_type {
466 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
468 u32 local_crash_msg_offset;
469 u16 local_crash_msg_count;
472 err = visorchannel_read(chipset_dev->controlvm_channel,
473 offsetof(struct spar_controlvm_channel_protocol,
474 saved_crash_message_count),
475 &local_crash_msg_count, sizeof(u16));
477 dev_err(&chipset_dev->acpi_device->dev,
478 "failed to read message count\n");
482 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
483 dev_err(&chipset_dev->acpi_device->dev,
484 "invalid number of messages\n");
488 err = visorchannel_read(chipset_dev->controlvm_channel,
489 offsetof(struct spar_controlvm_channel_protocol,
490 saved_crash_message_offset),
491 &local_crash_msg_offset, sizeof(u32));
493 dev_err(&chipset_dev->acpi_device->dev,
494 "failed to read offset\n");
500 local_crash_msg_offset += sizeof(struct controlvm_message);
501 err = visorchannel_write(chipset_dev->controlvm_channel,
502 local_crash_msg_offset,
504 sizeof(struct controlvm_message));
506 dev_err(&chipset_dev->acpi_device->dev,
507 "failed to write dev msg\n");
512 err = visorchannel_write(chipset_dev->controlvm_channel,
513 local_crash_msg_offset,
515 sizeof(struct controlvm_message));
517 dev_err(&chipset_dev->acpi_device->dev,
518 "failed to write bus msg\n");
523 dev_err(&chipset_dev->acpi_device->dev,
524 "Invalid crash_obj_type\n");
531 controlvm_responder(enum controlvm_id cmd_id,
532 struct controlvm_message_header *pending_msg_hdr,
535 if (!pending_msg_hdr)
538 if (pending_msg_hdr->id != (u32)cmd_id)
541 return controlvm_respond(pending_msg_hdr, response, NULL);
545 device_changestate_responder(enum controlvm_id cmd_id,
546 struct visor_device *p, int response,
547 struct spar_segment_state response_state)
549 struct controlvm_message outmsg;
550 u32 bus_no = p->chipset_bus_no;
551 u32 dev_no = p->chipset_dev_no;
553 if (!p->pending_msg_hdr)
555 if (p->pending_msg_hdr->id != cmd_id)
558 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
560 outmsg.cmd.device_change_state.bus_no = bus_no;
561 outmsg.cmd.device_change_state.dev_no = dev_no;
562 outmsg.cmd.device_change_state.state = response_state;
564 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
565 CONTROLVM_QUEUE_REQUEST, &outmsg);
569 bus_create(struct controlvm_message *inmsg)
571 struct controlvm_message_packet *cmd = &inmsg->cmd;
572 struct controlvm_message_header *pmsg_hdr = NULL;
573 u32 bus_no = cmd->create_bus.bus_no;
574 struct visor_device *bus_info;
575 struct visorchannel *visorchannel;
578 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
579 if (bus_info && (bus_info->state.created == 1)) {
580 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
586 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
588 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
594 INIT_LIST_HEAD(&bus_info->list_all);
595 bus_info->chipset_bus_no = bus_no;
596 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
598 POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
600 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
601 err = save_crash_message(inmsg, CRASH_BUS);
603 goto err_free_bus_info;
606 if (inmsg->hdr.flags.response_expected == 1) {
607 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
610 POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
611 bus_info->chipset_bus_no,
614 goto err_free_bus_info;
617 memcpy(pmsg_hdr, &inmsg->hdr,
618 sizeof(struct controlvm_message_header));
619 bus_info->pending_msg_hdr = pmsg_hdr;
622 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
623 cmd->create_bus.channel_bytes,
625 cmd->create_bus.bus_data_type_uuid);
628 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
631 goto err_free_pending_msg;
633 bus_info->visorchannel = visorchannel;
635 /* Response will be handled by chipset_bus_create */
636 err = chipset_bus_create(bus_info);
637 /* If error chipset_bus_create didn't respond, need to respond here */
639 goto err_destroy_channel;
641 POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
645 visorchannel_destroy(visorchannel);
647 err_free_pending_msg:
648 kfree(bus_info->pending_msg_hdr);
654 if (inmsg->hdr.flags.response_expected == 1)
655 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
660 bus_destroy(struct controlvm_message *inmsg)
662 struct controlvm_message_packet *cmd = &inmsg->cmd;
663 struct controlvm_message_header *pmsg_hdr = NULL;
664 u32 bus_no = cmd->destroy_bus.bus_no;
665 struct visor_device *bus_info;
668 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
673 if (bus_info->state.created == 0) {
677 if (bus_info->pending_msg_hdr) {
678 /* only non-NULL if dev is still waiting on a response */
682 if (inmsg->hdr.flags.response_expected == 1) {
683 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
689 memcpy(pmsg_hdr, &inmsg->hdr,
690 sizeof(struct controlvm_message_header));
691 bus_info->pending_msg_hdr = pmsg_hdr;
694 /* Response will be handled by chipset_bus_destroy */
695 chipset_bus_destroy(bus_info);
699 if (inmsg->hdr.flags.response_expected == 1)
700 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
705 bus_configure(struct controlvm_message *inmsg,
706 struct parser_context *parser_ctx)
708 struct controlvm_message_packet *cmd = &inmsg->cmd;
710 struct visor_device *bus_info;
713 bus_no = cmd->configure_bus.bus_no;
714 POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
715 DIAG_SEVERITY_PRINT);
717 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
719 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
723 } else if (bus_info->state.created == 0) {
724 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
728 } else if (bus_info->pending_msg_hdr) {
729 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
735 err = visorchannel_set_clientpartition
736 (bus_info->visorchannel,
737 cmd->configure_bus.guest_handle);
742 bus_info->partition_uuid = parser_id_get(parser_ctx);
743 bus_info->name = parser_name_get(parser_ctx);
746 POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
747 DIAG_SEVERITY_PRINT);
749 if (inmsg->hdr.flags.response_expected == 1)
750 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
754 if (inmsg->hdr.flags.response_expected == 1)
755 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
760 my_device_create(struct controlvm_message *inmsg)
762 struct controlvm_message_packet *cmd = &inmsg->cmd;
763 struct controlvm_message_header *pmsg_hdr = NULL;
764 u32 bus_no = cmd->create_device.bus_no;
765 u32 dev_no = cmd->create_device.dev_no;
766 struct visor_device *dev_info = NULL;
767 struct visor_device *bus_info;
768 struct visorchannel *visorchannel;
771 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
773 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
779 if (bus_info->state.created == 0) {
780 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
786 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
787 if (dev_info && (dev_info->state.created == 1)) {
788 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
794 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
796 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
802 dev_info->chipset_bus_no = bus_no;
803 dev_info->chipset_dev_no = dev_no;
804 dev_info->inst = cmd->create_device.dev_inst_uuid;
806 /* not sure where the best place to set the 'parent' */
807 dev_info->device.parent = &bus_info->device;
809 POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
810 DIAG_SEVERITY_PRINT);
813 visorchannel_create_with_lock(cmd->create_device.channel_addr,
814 cmd->create_device.channel_bytes,
816 cmd->create_device.data_type_uuid);
819 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
822 goto err_free_dev_info;
824 dev_info->visorchannel = visorchannel;
825 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
826 if (uuid_le_cmp(cmd->create_device.data_type_uuid,
827 spar_vhba_channel_protocol_uuid) == 0) {
828 err = save_crash_message(inmsg, CRASH_DEV);
830 goto err_destroy_visorchannel;
833 if (inmsg->hdr.flags.response_expected == 1) {
834 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
837 goto err_destroy_visorchannel;
840 memcpy(pmsg_hdr, &inmsg->hdr,
841 sizeof(struct controlvm_message_header));
842 dev_info->pending_msg_hdr = pmsg_hdr;
844 /* Chipset_device_create will send response */
845 err = chipset_device_create(dev_info);
847 goto err_destroy_visorchannel;
849 POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
850 DIAG_SEVERITY_PRINT);
853 err_destroy_visorchannel:
854 visorchannel_destroy(visorchannel);
860 if (inmsg->hdr.flags.response_expected == 1)
861 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
866 my_device_changestate(struct controlvm_message *inmsg)
868 struct controlvm_message_packet *cmd = &inmsg->cmd;
869 struct controlvm_message_header *pmsg_hdr = NULL;
870 u32 bus_no = cmd->device_change_state.bus_no;
871 u32 dev_no = cmd->device_change_state.dev_no;
872 struct spar_segment_state state = cmd->device_change_state.state;
873 struct visor_device *dev_info;
876 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
878 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
883 if (dev_info->state.created == 0) {
884 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
889 if (dev_info->pending_msg_hdr) {
890 /* only non-NULL if dev is still waiting on a response */
894 if (inmsg->hdr.flags.response_expected == 1) {
895 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
901 memcpy(pmsg_hdr, &inmsg->hdr,
902 sizeof(struct controlvm_message_header));
903 dev_info->pending_msg_hdr = pmsg_hdr;
906 if (state.alive == segment_state_running.alive &&
907 state.operating == segment_state_running.operating)
908 /* Response will be sent from chipset_device_resume */
909 err = chipset_device_resume(dev_info);
910 /* ServerNotReady / ServerLost / SegmentStateStandby */
911 else if (state.alive == segment_state_standby.alive &&
912 state.operating == segment_state_standby.operating)
914 * technically this is standby case where server is lost.
915 * Response will be sent from chipset_device_pause.
917 err = chipset_device_pause(dev_info);
924 if (inmsg->hdr.flags.response_expected == 1)
925 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
930 my_device_destroy(struct controlvm_message *inmsg)
932 struct controlvm_message_packet *cmd = &inmsg->cmd;
933 struct controlvm_message_header *pmsg_hdr = NULL;
934 u32 bus_no = cmd->destroy_device.bus_no;
935 u32 dev_no = cmd->destroy_device.dev_no;
936 struct visor_device *dev_info;
939 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
944 if (dev_info->state.created == 0) {
949 if (dev_info->pending_msg_hdr) {
950 /* only non-NULL if dev is still waiting on a response */
954 if (inmsg->hdr.flags.response_expected == 1) {
955 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
961 memcpy(pmsg_hdr, &inmsg->hdr,
962 sizeof(struct controlvm_message_header));
963 dev_info->pending_msg_hdr = pmsg_hdr;
966 chipset_device_destroy(dev_info);
970 if (inmsg->hdr.flags.response_expected == 1)
971 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
976 * The general parahotplug flow works as follows. The visorchipset receives
977 * a DEVICE_CHANGESTATE message from Command specifying a physical device
978 * to enable or disable. The CONTROLVM message handler calls
979 * parahotplug_process_message, which then adds the message to a global list
980 * and kicks off a udev event which causes a user level script to enable or
981 * disable the specified device. The udev script then writes to
982 * /sys/devices/platform/visorchipset/parahotplug, which causes the
983 * parahotplug store functions to get called, at which point the
984 * appropriate CONTROLVM message is retrieved from the list and responded
988 #define PARAHOTPLUG_TIMEOUT_MS 2000
991 * parahotplug_next_id() - generate unique int to match an outstanding
992 * CONTROLVM message with a udev script /sys
995 * Return: a unique integer value
998 parahotplug_next_id(void)
1000 static atomic_t id = ATOMIC_INIT(0);
1002 return atomic_inc_return(&id);
1006 * parahotplug_next_expiration() - returns the time (in jiffies) when a
1007 * CONTROLVM message on the list should expire
1008 * -- PARAHOTPLUG_TIMEOUT_MS in the future
1010 * Return: expected expiration time (in jiffies)
1012 static unsigned long
1013 parahotplug_next_expiration(void)
1015 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1019 * parahotplug_request_create() - create a parahotplug_request, which is
1020 * basically a wrapper for a CONTROLVM_MESSAGE
1021 * that we can stick on a list
1022 * @msg: the message to insert in the request
1024 * Return: the request containing the provided message
1026 static struct parahotplug_request *
1027 parahotplug_request_create(struct controlvm_message *msg)
1029 struct parahotplug_request *req;
1031 req = kmalloc(sizeof(*req), GFP_KERNEL);
1035 req->id = parahotplug_next_id();
1036 req->expiration = parahotplug_next_expiration();
1043 * parahotplug_request_destroy() - free a parahotplug_request
1044 * @req: the request to deallocate
1047 parahotplug_request_destroy(struct parahotplug_request *req)
1052 static LIST_HEAD(parahotplug_request_list);
1053 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
1056 * parahotplug_request_complete() - mark request as complete
1057 * @id: the id of the request
1058 * @active: indicates whether the request is assigned to active partition
1060 * Called from the /sys handler, which means the user script has
1061 * finished the enable/disable. Find the matching identifier, and
1062 * respond to the CONTROLVM message with success.
1064 * Return: 0 on success or -EINVAL on failure
1067 parahotplug_request_complete(int id, u16 active)
1069 struct list_head *pos;
1070 struct list_head *tmp;
1072 spin_lock(¶hotplug_request_list_lock);
1074 /* Look for a request matching "id". */
1075 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1076 struct parahotplug_request *req =
1077 list_entry(pos, struct parahotplug_request, list);
1078 if (req->id == id) {
1080 * Found a match. Remove it from the list and
1084 spin_unlock(¶hotplug_request_list_lock);
1085 req->msg.cmd.device_change_state.state.active = active;
1086 if (req->msg.hdr.flags.response_expected)
1088 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1089 &req->msg.cmd.device_change_state.state);
1090 parahotplug_request_destroy(req);
1095 spin_unlock(¶hotplug_request_list_lock);
1100 * devicedisabled_store() - disables the hotplug device
1101 * @dev: sysfs interface variable not utilized in this function
1102 * @attr: sysfs interface variable not utilized in this function
1103 * @buf: buffer containing the device id
1104 * @count: the size of the buffer
1106 * The parahotplug/devicedisabled interface gets called by our support script
1107 * when an SR-IOV device has been shut down. The ID is passed to the script
1108 * and then passed back when the device has been removed.
1110 * Return: the size of the buffer for success or negative for error
1112 static ssize_t devicedisabled_store(struct device *dev,
1113 struct device_attribute *attr,
1114 const char *buf, size_t count)
1119 if (kstrtouint(buf, 10, &id))
1122 err = parahotplug_request_complete(id, 0);
1127 static DEVICE_ATTR_WO(devicedisabled);
1130 * deviceenabled_store() - enables the hotplug device
1131 * @dev: sysfs interface variable not utilized in this function
1132 * @attr: sysfs interface variable not utilized in this function
1133 * @buf: buffer containing the device id
1134 * @count: the size of the buffer
1136 * The parahotplug/deviceenabled interface gets called by our support script
1137 * when an SR-IOV device has been recovered. The ID is passed to the script
1138 * and then passed back when the device has been brought back up.
1140 * Return: the size of the buffer for success or negative for error
1142 static ssize_t deviceenabled_store(struct device *dev,
1143 struct device_attribute *attr,
1144 const char *buf, size_t count)
1148 if (kstrtouint(buf, 10, &id))
1151 parahotplug_request_complete(id, 1);
1154 static DEVICE_ATTR_WO(deviceenabled);
1156 static struct attribute *visorchipset_install_attrs[] = {
1157 &dev_attr_toolaction.attr,
1158 &dev_attr_boottotool.attr,
1159 &dev_attr_error.attr,
1160 &dev_attr_textid.attr,
1161 &dev_attr_remaining_steps.attr,
1165 static const struct attribute_group visorchipset_install_group = {
1167 .attrs = visorchipset_install_attrs
1170 static struct attribute *visorchipset_parahotplug_attrs[] = {
1171 &dev_attr_devicedisabled.attr,
1172 &dev_attr_deviceenabled.attr,
1176 static struct attribute_group visorchipset_parahotplug_group = {
1177 .name = "parahotplug",
1178 .attrs = visorchipset_parahotplug_attrs
1181 static const struct attribute_group *visorchipset_dev_groups[] = {
1182 &visorchipset_install_group,
1183 &visorchipset_parahotplug_group,
1188 * parahotplug_request_kickoff() - initiate parahotplug request
1189 * @req: the request to initiate
1191 * Cause uevent to run the user level script to do the disable/enable specified
1192 * in the parahotplug_request.
1195 parahotplug_request_kickoff(struct parahotplug_request *req)
1197 struct controlvm_message_packet *cmd = &req->msg.cmd;
1198 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1201 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1204 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1205 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1206 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1207 cmd->device_change_state.state.active);
1208 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1209 cmd->device_change_state.bus_no);
1210 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1211 cmd->device_change_state.dev_no >> 3);
1212 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1213 cmd->device_change_state.dev_no & 0x7);
1215 return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1220 * parahotplug_process_message() - enables or disables a PCI device by kicking
1222 * @inmsg: the message indicating whether to enable or disable
1225 parahotplug_process_message(struct controlvm_message *inmsg)
1227 struct parahotplug_request *req;
1230 req = parahotplug_request_create(inmsg);
1236 * For enable messages, just respond with success right away, we don't
1237 * need to wait to see if the enable was successful.
1239 if (inmsg->cmd.device_change_state.state.active) {
1240 err = parahotplug_request_kickoff(req);
1243 controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
1244 &inmsg->cmd.device_change_state.state);
1245 parahotplug_request_destroy(req);
1250 * For disable messages, add the request to the
1251 * request list before kicking off the udev script. It
1252 * won't get responded to until the script has
1253 * indicated it's done.
1255 spin_lock(¶hotplug_request_list_lock);
1256 list_add_tail(&req->list, ¶hotplug_request_list);
1257 spin_unlock(¶hotplug_request_list_lock);
1259 err = parahotplug_request_kickoff(req);
1265 controlvm_respond(&inmsg->hdr, err,
1266 &inmsg->cmd.device_change_state.state);
1271 * chipset_ready_uevent() - sends chipset_ready action
1273 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1275 * Return: 0 on success, negative on failure
1278 chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1282 res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1285 if (msg_hdr->flags.response_expected)
1286 controlvm_respond(msg_hdr, res, NULL);
1292 * chipset_selftest_uevent() - sends chipset_selftest action
1294 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1296 * Return: 0 on success, negative on failure
1299 chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1301 char env_selftest[20];
1302 char *envp[] = { env_selftest, NULL };
1305 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1306 res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1309 if (msg_hdr->flags.response_expected)
1310 controlvm_respond(msg_hdr, res, NULL);
1316 * chipset_notready_uevent() - sends chipset_notready action
1318 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1320 * Return: 0 on success, negative on failure
1323 chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1327 res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1329 if (msg_hdr->flags.response_expected)
1330 controlvm_respond(msg_hdr, res, NULL);
1335 static int unisys_vmcall(unsigned long tuple, unsigned long param)
1338 unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
1339 unsigned long reg_ebx;
1340 unsigned long reg_ecx;
1342 reg_ebx = param & 0xFFFFFFFF;
1343 reg_ecx = param >> 32;
1345 cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
1346 if (!(cpuid_ecx & 0x80000000))
1349 __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
1350 "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
1357 error: /* Need to convert from VMCALL error codes to Linux */
1359 case VMCALL_RESULT_INVALID_PARAM:
1361 case VMCALL_RESULT_DATA_UNAVAILABLE:
1368 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1370 struct vmcall_io_controlvm_addr_params params;
1374 physaddr = virt_to_phys(¶ms);
1375 err = unisys_vmcall(VMCALL_CONTROLVM_ADDR, physaddr);
1379 *control_addr = params.address;
1380 *control_bytes = params.channel_bytes;
1385 static u64 controlvm_get_channel_address(void)
1390 if (issue_vmcall_io_controlvm_addr(&addr, &size))
1397 setup_crash_devices_work_queue(struct work_struct *work)
1399 struct controlvm_message local_crash_bus_msg;
1400 struct controlvm_message local_crash_dev_msg;
1401 struct controlvm_message msg;
1402 u32 local_crash_msg_offset;
1403 u16 local_crash_msg_count;
1405 POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
1407 /* send init chipset msg */
1408 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1409 msg.cmd.init_chipset.bus_count = 23;
1410 msg.cmd.init_chipset.switch_count = 0;
1414 /* get saved message count */
1415 if (visorchannel_read(chipset_dev->controlvm_channel,
1416 offsetof(struct spar_controlvm_channel_protocol,
1417 saved_crash_message_count),
1418 &local_crash_msg_count, sizeof(u16)) < 0) {
1419 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1424 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1425 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
1426 local_crash_msg_count,
1431 /* get saved crash message offset */
1432 if (visorchannel_read(chipset_dev->controlvm_channel,
1433 offsetof(struct spar_controlvm_channel_protocol,
1434 saved_crash_message_offset),
1435 &local_crash_msg_offset, sizeof(u32)) < 0) {
1436 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1441 /* read create device message for storage bus offset */
1442 if (visorchannel_read(chipset_dev->controlvm_channel,
1443 local_crash_msg_offset,
1444 &local_crash_bus_msg,
1445 sizeof(struct controlvm_message)) < 0) {
1446 POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
1451 /* read create device message for storage device */
1452 if (visorchannel_read(chipset_dev->controlvm_channel,
1453 local_crash_msg_offset +
1454 sizeof(struct controlvm_message),
1455 &local_crash_dev_msg,
1456 sizeof(struct controlvm_message)) < 0) {
1457 POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
1462 /* reuse IOVM create bus message */
1463 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1464 bus_create(&local_crash_bus_msg);
1466 POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
1471 /* reuse create device message for storage device */
1472 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1473 my_device_create(&local_crash_dev_msg);
1475 POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
1479 POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1483 bus_create_response(struct visor_device *bus_info, int response)
1486 bus_info->state.created = 1;
1488 controlvm_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1491 kfree(bus_info->pending_msg_hdr);
1492 bus_info->pending_msg_hdr = NULL;
1496 bus_destroy_response(struct visor_device *bus_info, int response)
1498 controlvm_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1501 kfree(bus_info->pending_msg_hdr);
1502 bus_info->pending_msg_hdr = NULL;
1506 device_create_response(struct visor_device *dev_info, int response)
1509 dev_info->state.created = 1;
1511 controlvm_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1514 kfree(dev_info->pending_msg_hdr);
1515 dev_info->pending_msg_hdr = NULL;
1519 device_destroy_response(struct visor_device *dev_info, int response)
1521 controlvm_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1524 kfree(dev_info->pending_msg_hdr);
1525 dev_info->pending_msg_hdr = NULL;
1529 device_pause_response(struct visor_device *dev_info,
1532 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1534 segment_state_standby);
1536 kfree(dev_info->pending_msg_hdr);
1537 dev_info->pending_msg_hdr = NULL;
1541 device_resume_response(struct visor_device *dev_info, int response)
1543 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1545 segment_state_running);
1547 kfree(dev_info->pending_msg_hdr);
1548 dev_info->pending_msg_hdr = NULL;
1551 static struct parser_context *
1552 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1554 int allocbytes = sizeof(struct parser_context) + bytes;
1555 struct parser_context *ctx;
1560 * alloc an 0 extra byte to ensure payload is
1564 if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
1565 > MAX_CONTROLVM_PAYLOAD_BYTES) {
1569 ctx = kzalloc(allocbytes, GFP_KERNEL);
1575 ctx->allocbytes = allocbytes;
1576 ctx->param_bytes = bytes;
1578 ctx->bytes_remaining = 0;
1579 ctx->byte_stream = false;
1583 if (addr > virt_to_phys(high_memory - 1))
1584 goto err_finish_ctx;
1585 p = __va((unsigned long)(addr));
1586 memcpy(ctx->data, p, bytes);
1588 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1591 goto err_finish_ctx;
1592 memcpy(ctx->data, mapping, bytes);
1596 ctx->byte_stream = true;
1597 chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1607 * handle_command() - process a controlvm message
1608 * @inmsg: the message to process
1609 * @channel_addr: address of the controlvm channel
1612 * 0 - Successfully processed the message
1613 * -EAGAIN - ControlVM message was not processed and should be retried
1614 * reading the next controlvm message; a scenario where this can
1615 * occur is when we need to throttle the allocation of memory in
1616 * which to copy out controlvm payload data.
1617 * < 0 - error: ControlVM message was processed but an error occurred.
1620 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1622 struct controlvm_message_packet *cmd = &inmsg.cmd;
1625 struct parser_context *parser_ctx = NULL;
1627 struct controlvm_message ackmsg;
1630 /* create parsing context if necessary */
1631 local_addr = (inmsg.hdr.flags.test_message == 1);
1632 if (channel_addr == 0)
1635 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1636 parm_bytes = inmsg.hdr.payload_bytes;
1639 * Parameter and channel addresses within test messages actually lie
1640 * within our OS-controlled memory. We need to know that, because it
1641 * makes a difference in how we compute the virtual address.
1643 if (parm_addr && parm_bytes) {
1647 parser_init_byte_stream(parm_addr, parm_bytes,
1648 local_addr, &retry);
1649 if (!parser_ctx && retry)
1654 controlvm_init_response(&ackmsg, &inmsg.hdr,
1655 CONTROLVM_RESP_SUCCESS);
1656 err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1657 CONTROLVM_QUEUE_ACK,
1662 switch (inmsg.hdr.id) {
1663 case CONTROLVM_CHIPSET_INIT:
1664 err = chipset_init(&inmsg);
1666 case CONTROLVM_BUS_CREATE:
1667 err = bus_create(&inmsg);
1669 case CONTROLVM_BUS_DESTROY:
1670 err = bus_destroy(&inmsg);
1672 case CONTROLVM_BUS_CONFIGURE:
1673 err = bus_configure(&inmsg, parser_ctx);
1675 case CONTROLVM_DEVICE_CREATE:
1676 err = my_device_create(&inmsg);
1678 case CONTROLVM_DEVICE_CHANGESTATE:
1679 if (cmd->device_change_state.flags.phys_device) {
1680 err = parahotplug_process_message(&inmsg);
1683 * save the hdr and cmd structures for later use
1684 * when sending back the response to Command
1686 err = my_device_changestate(&inmsg);
1690 case CONTROLVM_DEVICE_DESTROY:
1691 err = my_device_destroy(&inmsg);
1693 case CONTROLVM_DEVICE_CONFIGURE:
1694 /* no op just send a respond that we passed */
1695 if (inmsg.hdr.flags.response_expected)
1696 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
1699 case CONTROLVM_CHIPSET_READY:
1700 err = chipset_ready_uevent(&inmsg.hdr);
1702 case CONTROLVM_CHIPSET_SELFTEST:
1703 err = chipset_selftest_uevent(&inmsg.hdr);
1705 case CONTROLVM_CHIPSET_STOP:
1706 err = chipset_notready_uevent(&inmsg.hdr);
1710 if (inmsg.hdr.flags.response_expected)
1711 controlvm_respond(&inmsg.hdr,
1712 -CONTROLVM_RESP_ID_UNKNOWN, NULL);
1717 parser_done(parser_ctx);
1724 * read_controlvm_event() - retreives the next message from the
1725 * CONTROLVM_QUEUE_EVENT queue in the controlvm
1727 * @msg: pointer to the retrieved message
1729 * Return: 0 if valid message was retrieved or -error
1732 read_controlvm_event(struct controlvm_message *msg)
1736 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1737 CONTROLVM_QUEUE_EVENT, msg);
1742 if (msg->hdr.flags.test_message == 1)
1749 * parahotplug_process_list() - remove any request from the list that's been on
1750 * there too long and respond with an error
1753 parahotplug_process_list(void)
1755 struct list_head *pos;
1756 struct list_head *tmp;
1758 spin_lock(¶hotplug_request_list_lock);
1760 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1761 struct parahotplug_request *req =
1762 list_entry(pos, struct parahotplug_request, list);
1764 if (!time_after_eq(jiffies, req->expiration))
1768 if (req->msg.hdr.flags.response_expected)
1771 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1772 &req->msg.cmd.device_change_state.state);
1773 parahotplug_request_destroy(req);
1776 spin_unlock(¶hotplug_request_list_lock);
1780 controlvm_periodic_work(struct work_struct *work)
1782 struct controlvm_message inmsg;
1786 /* Drain the RESPONSE queue make it empty */
1788 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1789 CONTROLVM_QUEUE_RESPONSE,
1791 } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
1796 if (chipset_dev->controlvm_pending_msg_valid) {
1798 * we throttled processing of a prior
1799 * msg, so try to process it again
1800 * rather than reading a new one
1802 inmsg = chipset_dev->controlvm_pending_msg;
1803 chipset_dev->controlvm_pending_msg_valid = false;
1806 err = read_controlvm_event(&inmsg);
1810 chipset_dev->most_recent_message_jiffies = jiffies;
1811 err = handle_command(inmsg,
1812 visorchannel_get_physaddr
1813 (chipset_dev->controlvm_channel));
1814 if (err == -EAGAIN) {
1815 chipset_dev->controlvm_pending_msg = inmsg;
1816 chipset_dev->controlvm_pending_msg_valid = true;
1820 err = read_controlvm_event(&inmsg);
1823 /* parahotplug_worker */
1824 parahotplug_process_list();
1827 if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1828 (HZ * MIN_IDLE_SECONDS))) {
1830 * it's been longer than MIN_IDLE_SECONDS since we
1831 * processed our last controlvm message; slow down the
1834 if (chipset_dev->poll_jiffies !=
1835 POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1836 chipset_dev->poll_jiffies =
1837 POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1839 if (chipset_dev->poll_jiffies !=
1840 POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1841 chipset_dev->poll_jiffies =
1842 POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1845 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1846 chipset_dev->poll_jiffies);
1850 visorchipset_init(struct acpi_device *acpi_device)
1854 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
1855 struct visorchannel *controlvm_channel;
1857 addr = controlvm_get_channel_address();
1861 chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1865 acpi_device->driver_data = chipset_dev;
1867 chipset_dev->acpi_device = acpi_device;
1868 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1869 controlvm_channel = visorchannel_create_with_lock(addr,
1870 0, GFP_KERNEL, uuid);
1872 if (!controlvm_channel)
1873 goto error_free_chipset_dev;
1875 chipset_dev->controlvm_channel = controlvm_channel;
1877 err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1878 visorchipset_dev_groups);
1880 goto error_destroy_channel;
1882 if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
1883 visorchannel_get_header(controlvm_channel)))
1884 goto error_delete_groups;
1886 /* if booting in a crash kernel */
1887 if (is_kdump_kernel())
1888 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1889 setup_crash_devices_work_queue);
1891 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1892 controlvm_periodic_work);
1894 chipset_dev->most_recent_message_jiffies = jiffies;
1895 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1896 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1897 chipset_dev->poll_jiffies);
1899 POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
1901 err = visorbus_init();
1903 goto error_cancel_work;
1908 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1910 error_delete_groups:
1911 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1912 visorchipset_dev_groups);
1914 error_destroy_channel:
1915 visorchannel_destroy(chipset_dev->controlvm_channel);
1917 error_free_chipset_dev:
1921 POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
1926 visorchipset_exit(struct acpi_device *acpi_device)
1929 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1930 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1931 visorchipset_dev_groups);
1933 visorchannel_destroy(chipset_dev->controlvm_channel);
1939 static const struct acpi_device_id unisys_device_ids[] = {
1944 static struct acpi_driver unisys_acpi_driver = {
1945 .name = "unisys_acpi",
1946 .class = "unisys_acpi_class",
1947 .owner = THIS_MODULE,
1948 .ids = unisys_device_ids,
1950 .add = visorchipset_init,
1951 .remove = visorchipset_exit,
1955 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1957 static __init int visorutil_spar_detect(void)
1959 unsigned int eax, ebx, ecx, edx;
1961 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1963 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1964 return (ebx == UNISYS_SPAR_ID_EBX) &&
1965 (ecx == UNISYS_SPAR_ID_ECX) &&
1966 (edx == UNISYS_SPAR_ID_EDX);
1972 static int init_unisys(void)
1976 if (!visorutil_spar_detect())
1979 result = acpi_bus_register_driver(&unisys_acpi_driver);
1983 pr_info("Unisys Visorchipset Driver Loaded.\n");
1987 static void exit_unisys(void)
1989 acpi_bus_unregister_driver(&unisys_acpi_driver);
1992 module_init(init_unisys);
1993 module_exit(exit_unisys);
1995 MODULE_AUTHOR("Unisys");
1996 MODULE_LICENSE("GPL");
1997 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");