3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
17 #include <linux/acpi.h>
18 #include <linux/ctype.h>
21 #include <linux/nls.h>
22 #include <linux/netdevice.h>
23 #include <linux/uuid.h>
24 #include <linux/crash_dump.h>
27 #include "visorbus_private.h"
28 #include "vmcallinterface.h"
30 #define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
32 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
33 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
35 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
37 #define UNISYS_SPAR_LEAF_ID 0x40000000
39 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
40 #define UNISYS_SPAR_ID_EBX 0x73696e55
41 #define UNISYS_SPAR_ID_ECX 0x70537379
42 #define UNISYS_SPAR_ID_EDX 0x34367261
45 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
46 * we switch to slow polling mode. As soon as we get a controlvm
47 * message, we switch back to fast polling mode.
49 #define MIN_IDLE_SECONDS 10
51 struct parser_context {
52 unsigned long allocbytes;
53 unsigned long param_bytes;
55 unsigned long bytes_remaining;
60 struct visorchipset_device {
61 struct acpi_device *acpi_device;
62 unsigned long poll_jiffies;
63 /* when we got our last controlvm message */
64 unsigned long most_recent_message_jiffies;
65 struct delayed_work periodic_controlvm_work;
66 struct visorchannel *controlvm_channel;
67 unsigned long controlvm_payload_bytes_buffered;
69 * The following variables are used to handle the scenario where we are
70 * unable to offload the payload from a controlvm message due to memory
71 * requirements. In this scenario, we simply stash the controlvm
72 * message, then attempt to process it again the next time
73 * controlvm_periodic_work() runs.
75 struct controlvm_message controlvm_pending_msg;
76 bool controlvm_pending_msg_valid;
79 static struct visorchipset_device *chipset_dev;
81 struct parahotplug_request {
82 struct list_head list;
84 unsigned long expiration;
85 struct controlvm_message msg;
88 /* prototypes for attributes */
89 static ssize_t toolaction_show(struct device *dev,
90 struct device_attribute *attr,
96 err = visorchannel_read(chipset_dev->controlvm_channel,
97 offsetof(struct spar_controlvm_channel_protocol,
99 &tool_action, sizeof(u8));
103 return sprintf(buf, "%u\n", tool_action);
106 static ssize_t toolaction_store(struct device *dev,
107 struct device_attribute *attr,
108 const char *buf, size_t count)
113 if (kstrtou8(buf, 10, &tool_action))
116 ret = visorchannel_write
117 (chipset_dev->controlvm_channel,
118 offsetof(struct spar_controlvm_channel_protocol,
120 &tool_action, sizeof(u8));
126 static DEVICE_ATTR_RW(toolaction);
128 static ssize_t boottotool_show(struct device *dev,
129 struct device_attribute *attr,
132 struct efi_spar_indication efi_spar_indication;
135 err = visorchannel_read(chipset_dev->controlvm_channel,
136 offsetof(struct spar_controlvm_channel_protocol,
138 &efi_spar_indication,
139 sizeof(struct efi_spar_indication));
143 return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
146 static ssize_t boottotool_store(struct device *dev,
147 struct device_attribute *attr,
148 const char *buf, size_t count)
151 struct efi_spar_indication efi_spar_indication;
153 if (kstrtoint(buf, 10, &val))
156 efi_spar_indication.boot_to_tool = val;
157 ret = visorchannel_write
158 (chipset_dev->controlvm_channel,
159 offsetof(struct spar_controlvm_channel_protocol,
160 efi_spar_ind), &(efi_spar_indication),
161 sizeof(struct efi_spar_indication));
167 static DEVICE_ATTR_RW(boottotool);
169 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
175 err = visorchannel_read(chipset_dev->controlvm_channel,
176 offsetof(struct spar_controlvm_channel_protocol,
178 &error, sizeof(u32));
181 return sprintf(buf, "%i\n", error);
184 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
185 const char *buf, size_t count)
190 if (kstrtou32(buf, 10, &error))
193 ret = visorchannel_write
194 (chipset_dev->controlvm_channel,
195 offsetof(struct spar_controlvm_channel_protocol,
197 &error, sizeof(u32));
202 static DEVICE_ATTR_RW(error);
204 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
210 err = visorchannel_read
211 (chipset_dev->controlvm_channel,
212 offsetof(struct spar_controlvm_channel_protocol,
213 installation_text_id),
214 &text_id, sizeof(u32));
218 return sprintf(buf, "%i\n", text_id);
221 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
222 const char *buf, size_t count)
227 if (kstrtou32(buf, 10, &text_id))
230 ret = visorchannel_write
231 (chipset_dev->controlvm_channel,
232 offsetof(struct spar_controlvm_channel_protocol,
233 installation_text_id),
234 &text_id, sizeof(u32));
239 static DEVICE_ATTR_RW(textid);
241 static ssize_t remaining_steps_show(struct device *dev,
242 struct device_attribute *attr, char *buf)
244 u16 remaining_steps = 0;
246 visorchannel_read(chipset_dev->controlvm_channel,
247 offsetof(struct spar_controlvm_channel_protocol,
248 installation_remaining_steps),
249 &remaining_steps, sizeof(u16));
250 return sprintf(buf, "%hu\n", remaining_steps);
253 static ssize_t remaining_steps_store(struct device *dev,
254 struct device_attribute *attr,
255 const char *buf, size_t count)
260 if (kstrtou16(buf, 10, &remaining_steps))
263 ret = visorchannel_write
264 (chipset_dev->controlvm_channel,
265 offsetof(struct spar_controlvm_channel_protocol,
266 installation_remaining_steps),
267 &remaining_steps, sizeof(u16));
272 static DEVICE_ATTR_RW(remaining_steps);
275 parser_id_get(struct parser_context *ctx)
277 struct spar_controlvm_parameters_header *phdr = NULL;
279 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
283 static void parser_done(struct parser_context *ctx)
285 chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
290 parser_string_get(struct parser_context *ctx)
294 int value_length = -1;
299 nscan = ctx->bytes_remaining;
304 for (i = 0, value_length = -1; i < nscan; i++)
305 if (pscan[i] == '\0') {
309 if (value_length < 0) /* '\0' was not included in the length */
310 value_length = nscan;
311 value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
314 if (value_length > 0)
315 memcpy(value, pscan, value_length);
316 ((u8 *)(value))[value_length] = '\0';
321 parser_name_get(struct parser_context *ctx)
323 struct spar_controlvm_parameters_header *phdr = NULL;
325 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
327 if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
330 ctx->curr = ctx->data + phdr->name_offset;
331 ctx->bytes_remaining = phdr->name_length;
332 return parser_string_get(ctx);
335 struct visor_busdev {
340 static int match_visorbus_dev_by_id(struct device *dev, void *data)
342 struct visor_device *vdev = to_visor_device(dev);
343 struct visor_busdev *id = data;
344 u32 bus_no = id->bus_no;
345 u32 dev_no = id->dev_no;
347 if ((vdev->chipset_bus_no == bus_no) &&
348 (vdev->chipset_dev_no == dev_no))
354 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
355 struct visor_device *from)
358 struct device *dev_start = NULL;
359 struct visor_device *vdev = NULL;
360 struct visor_busdev id = {
366 dev_start = &from->device;
367 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
368 match_visorbus_dev_by_id);
370 vdev = to_visor_device(dev);
375 controlvm_init_response(struct controlvm_message *msg,
376 struct controlvm_message_header *msg_hdr, int response)
378 memset(msg, 0, sizeof(struct controlvm_message));
379 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
380 msg->hdr.payload_bytes = 0;
381 msg->hdr.payload_vm_offset = 0;
382 msg->hdr.payload_max_bytes = 0;
384 msg->hdr.flags.failed = 1;
385 msg->hdr.completion_status = (u32)(-response);
390 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
392 enum ultra_chipset_feature features)
394 struct controlvm_message outmsg;
396 controlvm_init_response(&outmsg, msg_hdr, response);
397 outmsg.cmd.init_chipset.features = features;
398 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
399 CONTROLVM_QUEUE_REQUEST, &outmsg);
403 chipset_init(struct controlvm_message *inmsg)
405 static int chipset_inited;
406 enum ultra_chipset_feature features = 0;
407 int rc = CONTROLVM_RESP_SUCCESS;
410 POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
411 if (chipset_inited) {
412 rc = -CONTROLVM_RESP_ALREADY_DONE;
417 POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
420 * Set features to indicate we support parahotplug (if Command
423 features = inmsg->cmd.init_chipset.features &
424 ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
427 * Set the "reply" bit so Command knows this is a
428 * features-aware driver.
430 features |= ULTRA_CHIPSET_FEATURE_REPLY;
433 if (inmsg->hdr.flags.response_expected)
434 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
440 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
442 struct controlvm_message outmsg;
444 controlvm_init_response(&outmsg, msg_hdr, response);
445 if (outmsg.hdr.flags.test_message == 1)
448 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
449 CONTROLVM_QUEUE_REQUEST, &outmsg);
452 static int controlvm_respond_physdev_changestate(
453 struct controlvm_message_header *msg_hdr, int response,
454 struct spar_segment_state state)
456 struct controlvm_message outmsg;
458 controlvm_init_response(&outmsg, msg_hdr, response);
459 outmsg.cmd.device_change_state.state = state;
460 outmsg.cmd.device_change_state.flags.phys_device = 1;
461 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
462 CONTROLVM_QUEUE_REQUEST, &outmsg);
465 enum crash_obj_type {
471 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
473 u32 local_crash_msg_offset;
474 u16 local_crash_msg_count;
477 err = visorchannel_read(chipset_dev->controlvm_channel,
478 offsetof(struct spar_controlvm_channel_protocol,
479 saved_crash_message_count),
480 &local_crash_msg_count, sizeof(u16));
482 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
487 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
488 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
489 local_crash_msg_count,
494 err = visorchannel_read(chipset_dev->controlvm_channel,
495 offsetof(struct spar_controlvm_channel_protocol,
496 saved_crash_message_offset),
497 &local_crash_msg_offset, sizeof(u32));
499 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
506 local_crash_msg_offset += sizeof(struct controlvm_message);
507 err = visorchannel_write(chipset_dev->controlvm_channel,
508 local_crash_msg_offset,
510 sizeof(struct controlvm_message));
512 POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
518 err = visorchannel_write(chipset_dev->controlvm_channel,
519 local_crash_msg_offset,
521 sizeof(struct controlvm_message));
523 POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
529 pr_info("Invalid crash_obj_type\n");
536 bus_responder(enum controlvm_id cmd_id,
537 struct controlvm_message_header *pending_msg_hdr,
540 if (!pending_msg_hdr)
543 if (pending_msg_hdr->id != (u32)cmd_id)
546 return controlvm_respond(pending_msg_hdr, response);
550 device_changestate_responder(enum controlvm_id cmd_id,
551 struct visor_device *p, int response,
552 struct spar_segment_state response_state)
554 struct controlvm_message outmsg;
555 u32 bus_no = p->chipset_bus_no;
556 u32 dev_no = p->chipset_dev_no;
558 if (!p->pending_msg_hdr)
560 if (p->pending_msg_hdr->id != cmd_id)
563 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
565 outmsg.cmd.device_change_state.bus_no = bus_no;
566 outmsg.cmd.device_change_state.dev_no = dev_no;
567 outmsg.cmd.device_change_state.state = response_state;
569 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
570 CONTROLVM_QUEUE_REQUEST, &outmsg);
574 device_responder(enum controlvm_id cmd_id,
575 struct controlvm_message_header *pending_msg_hdr,
578 if (!pending_msg_hdr)
581 if (pending_msg_hdr->id != (u32)cmd_id)
584 return controlvm_respond(pending_msg_hdr, response);
588 bus_create(struct controlvm_message *inmsg)
590 struct controlvm_message_packet *cmd = &inmsg->cmd;
591 struct controlvm_message_header *pmsg_hdr = NULL;
592 u32 bus_no = cmd->create_bus.bus_no;
593 struct visor_device *bus_info;
594 struct visorchannel *visorchannel;
597 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
598 if (bus_info && (bus_info->state.created == 1)) {
599 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
605 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
607 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
613 INIT_LIST_HEAD(&bus_info->list_all);
614 bus_info->chipset_bus_no = bus_no;
615 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
617 POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
619 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
620 err = save_crash_message(inmsg, CRASH_BUS);
622 goto err_free_bus_info;
625 if (inmsg->hdr.flags.response_expected == 1) {
626 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
629 POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
630 bus_info->chipset_bus_no,
633 goto err_free_bus_info;
636 memcpy(pmsg_hdr, &inmsg->hdr,
637 sizeof(struct controlvm_message_header));
638 bus_info->pending_msg_hdr = pmsg_hdr;
641 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
642 cmd->create_bus.channel_bytes,
644 cmd->create_bus.bus_data_type_uuid);
647 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
650 goto err_free_pending_msg;
652 bus_info->visorchannel = visorchannel;
654 /* Response will be handled by chipset_bus_create */
655 chipset_bus_create(bus_info);
657 POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
660 err_free_pending_msg:
661 kfree(bus_info->pending_msg_hdr);
667 if (inmsg->hdr.flags.response_expected == 1)
668 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
673 bus_destroy(struct controlvm_message *inmsg)
675 struct controlvm_message_packet *cmd = &inmsg->cmd;
676 struct controlvm_message_header *pmsg_hdr = NULL;
677 u32 bus_no = cmd->destroy_bus.bus_no;
678 struct visor_device *bus_info;
681 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
686 if (bus_info->state.created == 0) {
690 if (bus_info->pending_msg_hdr) {
691 /* only non-NULL if dev is still waiting on a response */
695 if (inmsg->hdr.flags.response_expected == 1) {
696 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
698 POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
699 bus_info->chipset_bus_no,
705 memcpy(pmsg_hdr, &inmsg->hdr,
706 sizeof(struct controlvm_message_header));
707 bus_info->pending_msg_hdr = pmsg_hdr;
710 /* Response will be handled by chipset_bus_destroy */
711 chipset_bus_destroy(bus_info);
715 if (inmsg->hdr.flags.response_expected == 1)
716 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
721 bus_configure(struct controlvm_message *inmsg,
722 struct parser_context *parser_ctx)
724 struct controlvm_message_packet *cmd = &inmsg->cmd;
726 struct visor_device *bus_info;
729 bus_no = cmd->configure_bus.bus_no;
730 POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
731 DIAG_SEVERITY_PRINT);
733 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
735 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
739 } else if (bus_info->state.created == 0) {
740 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
744 } else if (bus_info->pending_msg_hdr) {
745 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
751 err = visorchannel_set_clientpartition
752 (bus_info->visorchannel,
753 cmd->configure_bus.guest_handle);
758 bus_info->partition_uuid = parser_id_get(parser_ctx);
759 bus_info->name = parser_name_get(parser_ctx);
762 POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
763 DIAG_SEVERITY_PRINT);
765 if (inmsg->hdr.flags.response_expected == 1)
766 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
770 if (inmsg->hdr.flags.response_expected == 1)
771 bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
776 my_device_create(struct controlvm_message *inmsg)
778 struct controlvm_message_packet *cmd = &inmsg->cmd;
779 struct controlvm_message_header *pmsg_hdr = NULL;
780 u32 bus_no = cmd->create_device.bus_no;
781 u32 dev_no = cmd->create_device.dev_no;
782 struct visor_device *dev_info = NULL;
783 struct visor_device *bus_info;
784 struct visorchannel *visorchannel;
787 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
789 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
795 if (bus_info->state.created == 0) {
796 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
802 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
803 if (dev_info && (dev_info->state.created == 1)) {
804 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
810 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
812 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
818 dev_info->chipset_bus_no = bus_no;
819 dev_info->chipset_dev_no = dev_no;
820 dev_info->inst = cmd->create_device.dev_inst_uuid;
822 /* not sure where the best place to set the 'parent' */
823 dev_info->device.parent = &bus_info->device;
825 POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
826 DIAG_SEVERITY_PRINT);
829 visorchannel_create_with_lock(cmd->create_device.channel_addr,
830 cmd->create_device.channel_bytes,
832 cmd->create_device.data_type_uuid);
835 POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
838 goto err_free_dev_info;
840 dev_info->visorchannel = visorchannel;
841 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
842 if (uuid_le_cmp(cmd->create_device.data_type_uuid,
843 spar_vhba_channel_protocol_uuid) == 0) {
844 err = save_crash_message(inmsg, CRASH_DEV);
846 goto err_free_dev_info;
849 if (inmsg->hdr.flags.response_expected == 1) {
850 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
853 goto err_free_dev_info;
856 memcpy(pmsg_hdr, &inmsg->hdr,
857 sizeof(struct controlvm_message_header));
858 dev_info->pending_msg_hdr = pmsg_hdr;
860 /* Chipset_device_create will send response */
861 chipset_device_create(dev_info);
862 POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
863 DIAG_SEVERITY_PRINT);
870 if (inmsg->hdr.flags.response_expected == 1)
871 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
876 my_device_changestate(struct controlvm_message *inmsg)
878 struct controlvm_message_packet *cmd = &inmsg->cmd;
879 struct controlvm_message_header *pmsg_hdr = NULL;
880 u32 bus_no = cmd->device_change_state.bus_no;
881 u32 dev_no = cmd->device_change_state.dev_no;
882 struct spar_segment_state state = cmd->device_change_state.state;
883 struct visor_device *dev_info;
886 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
888 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
893 if (dev_info->state.created == 0) {
894 POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
899 if (dev_info->pending_msg_hdr) {
900 /* only non-NULL if dev is still waiting on a response */
904 if (inmsg->hdr.flags.response_expected == 1) {
905 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
911 memcpy(pmsg_hdr, &inmsg->hdr,
912 sizeof(struct controlvm_message_header));
913 dev_info->pending_msg_hdr = pmsg_hdr;
916 if (state.alive == segment_state_running.alive &&
917 state.operating == segment_state_running.operating)
918 /* Response will be sent from chipset_device_resume */
919 chipset_device_resume(dev_info);
920 /* ServerNotReady / ServerLost / SegmentStateStandby */
921 else if (state.alive == segment_state_standby.alive &&
922 state.operating == segment_state_standby.operating)
924 * technically this is standby case where server is lost.
925 * Response will be sent from chipset_device_pause.
927 chipset_device_pause(dev_info);
931 if (inmsg->hdr.flags.response_expected == 1)
932 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
937 my_device_destroy(struct controlvm_message *inmsg)
939 struct controlvm_message_packet *cmd = &inmsg->cmd;
940 struct controlvm_message_header *pmsg_hdr = NULL;
941 u32 bus_no = cmd->destroy_device.bus_no;
942 u32 dev_no = cmd->destroy_device.dev_no;
943 struct visor_device *dev_info;
946 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
951 if (dev_info->state.created == 0) {
956 if (dev_info->pending_msg_hdr) {
957 /* only non-NULL if dev is still waiting on a response */
961 if (inmsg->hdr.flags.response_expected == 1) {
962 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
968 memcpy(pmsg_hdr, &inmsg->hdr,
969 sizeof(struct controlvm_message_header));
970 dev_info->pending_msg_hdr = pmsg_hdr;
973 chipset_device_destroy(dev_info);
977 if (inmsg->hdr.flags.response_expected == 1)
978 device_responder(inmsg->hdr.id, &inmsg->hdr, err);
983 * The general parahotplug flow works as follows. The visorchipset receives
984 * a DEVICE_CHANGESTATE message from Command specifying a physical device
985 * to enable or disable. The CONTROLVM message handler calls
986 * parahotplug_process_message, which then adds the message to a global list
987 * and kicks off a udev event which causes a user level script to enable or
988 * disable the specified device. The udev script then writes to
989 * /sys/devices/platform/visorchipset/parahotplug, which causes the
990 * parahotplug store functions to get called, at which point the
991 * appropriate CONTROLVM message is retrieved from the list and responded
995 #define PARAHOTPLUG_TIMEOUT_MS 2000
998 * parahotplug_next_id() - generate unique int to match an outstanding
999 * CONTROLVM message with a udev script /sys
1002 * Return: a unique integer value
1005 parahotplug_next_id(void)
1007 static atomic_t id = ATOMIC_INIT(0);
1009 return atomic_inc_return(&id);
1013 * parahotplug_next_expiration() - returns the time (in jiffies) when a
1014 * CONTROLVM message on the list should expire
1015 * -- PARAHOTPLUG_TIMEOUT_MS in the future
1017 * Return: expected expiration time (in jiffies)
1019 static unsigned long
1020 parahotplug_next_expiration(void)
1022 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1026 * parahotplug_request_create() - create a parahotplug_request, which is
1027 * basically a wrapper for a CONTROLVM_MESSAGE
1028 * that we can stick on a list
1029 * @msg: the message to insert in the request
1031 * Return: the request containing the provided message
1033 static struct parahotplug_request *
1034 parahotplug_request_create(struct controlvm_message *msg)
1036 struct parahotplug_request *req;
1038 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1042 req->id = parahotplug_next_id();
1043 req->expiration = parahotplug_next_expiration();
1050 * parahotplug_request_destroy() - free a parahotplug_request
1051 * @req: the request to deallocate
1054 parahotplug_request_destroy(struct parahotplug_request *req)
1059 static LIST_HEAD(parahotplug_request_list);
1060 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
1063 * parahotplug_request_complete() - mark request as complete
1064 * @id: the id of the request
1065 * @active: indicates whether the request is assigned to active partition
1067 * Called from the /sys handler, which means the user script has
1068 * finished the enable/disable. Find the matching identifier, and
1069 * respond to the CONTROLVM message with success.
1071 * Return: 0 on success or -EINVAL on failure
1074 parahotplug_request_complete(int id, u16 active)
1076 struct list_head *pos;
1077 struct list_head *tmp;
1079 spin_lock(¶hotplug_request_list_lock);
1081 /* Look for a request matching "id". */
1082 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1083 struct parahotplug_request *req =
1084 list_entry(pos, struct parahotplug_request, list);
1085 if (req->id == id) {
1087 * Found a match. Remove it from the list and
1091 spin_unlock(¶hotplug_request_list_lock);
1092 req->msg.cmd.device_change_state.state.active = active;
1093 if (req->msg.hdr.flags.response_expected)
1094 controlvm_respond_physdev_changestate(
1095 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1096 req->msg.cmd.device_change_state.state);
1097 parahotplug_request_destroy(req);
1102 spin_unlock(¶hotplug_request_list_lock);
1107 * devicedisabled_store() - disables the hotplug device
1108 * @dev: sysfs interface variable not utilized in this function
1109 * @attr: sysfs interface variable not utilized in this function
1110 * @buf: buffer containing the device id
1111 * @count: the size of the buffer
1113 * The parahotplug/devicedisabled interface gets called by our support script
1114 * when an SR-IOV device has been shut down. The ID is passed to the script
1115 * and then passed back when the device has been removed.
1117 * Return: the size of the buffer for success or negative for error
1119 static ssize_t devicedisabled_store(struct device *dev,
1120 struct device_attribute *attr,
1121 const char *buf, size_t count)
1126 if (kstrtouint(buf, 10, &id))
1129 err = parahotplug_request_complete(id, 0);
1134 static DEVICE_ATTR_WO(devicedisabled);
1137 * deviceenabled_store() - enables the hotplug device
1138 * @dev: sysfs interface variable not utilized in this function
1139 * @attr: sysfs interface variable not utilized in this function
1140 * @buf: buffer containing the device id
1141 * @count: the size of the buffer
1143 * The parahotplug/deviceenabled interface gets called by our support script
1144 * when an SR-IOV device has been recovered. The ID is passed to the script
1145 * and then passed back when the device has been brought back up.
1147 * Return: the size of the buffer for success or negative for error
1149 static ssize_t deviceenabled_store(struct device *dev,
1150 struct device_attribute *attr,
1151 const char *buf, size_t count)
1155 if (kstrtouint(buf, 10, &id))
1158 parahotplug_request_complete(id, 1);
1161 static DEVICE_ATTR_WO(deviceenabled);
1163 static struct attribute *visorchipset_install_attrs[] = {
1164 &dev_attr_toolaction.attr,
1165 &dev_attr_boottotool.attr,
1166 &dev_attr_error.attr,
1167 &dev_attr_textid.attr,
1168 &dev_attr_remaining_steps.attr,
1172 static const struct attribute_group visorchipset_install_group = {
1174 .attrs = visorchipset_install_attrs
1177 static struct attribute *visorchipset_parahotplug_attrs[] = {
1178 &dev_attr_devicedisabled.attr,
1179 &dev_attr_deviceenabled.attr,
1183 static struct attribute_group visorchipset_parahotplug_group = {
1184 .name = "parahotplug",
1185 .attrs = visorchipset_parahotplug_attrs
1188 static const struct attribute_group *visorchipset_dev_groups[] = {
1189 &visorchipset_install_group,
1190 &visorchipset_parahotplug_group,
1195 * parahotplug_request_kickoff() - initiate parahotplug request
1196 * @req: the request to initiate
1198 * Cause uevent to run the user level script to do the disable/enable specified
1199 * in the parahotplug_request.
1202 parahotplug_request_kickoff(struct parahotplug_request *req)
1204 struct controlvm_message_packet *cmd = &req->msg.cmd;
1205 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1208 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1211 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1212 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1213 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1214 cmd->device_change_state.state.active);
1215 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1216 cmd->device_change_state.bus_no);
1217 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1218 cmd->device_change_state.dev_no >> 3);
1219 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1220 cmd->device_change_state.dev_no & 0x7);
1222 return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1227 * parahotplug_process_message() - enables or disables a PCI device by kicking
1229 * @inmsg: the message indicating whether to enable or disable
1232 parahotplug_process_message(struct controlvm_message *inmsg)
1234 struct parahotplug_request *req;
1237 req = parahotplug_request_create(inmsg);
1242 if (inmsg->cmd.device_change_state.state.active) {
1244 * For enable messages, just respond with success
1245 * right away. This is a bit of a hack, but there are
1246 * issues with the early enable messages we get (with
1247 * either the udev script not detecting that the device
1248 * is up, or not getting called at all). Fortunately
1249 * the messages that get lost don't matter anyway, as
1251 * devices are automatically enabled at
1254 err = parahotplug_request_kickoff(req);
1257 controlvm_respond_physdev_changestate
1259 CONTROLVM_RESP_SUCCESS,
1260 inmsg->cmd.device_change_state.state);
1261 parahotplug_request_destroy(req);
1266 * For disable messages, add the request to the
1267 * request list before kicking off the udev script. It
1268 * won't get responded to until the script has
1269 * indicated it's done.
1271 spin_lock(¶hotplug_request_list_lock);
1272 list_add_tail(&req->list, ¶hotplug_request_list);
1273 spin_unlock(¶hotplug_request_list_lock);
1275 err = parahotplug_request_kickoff(req);
1281 controlvm_respond_physdev_changestate
1283 inmsg->cmd.device_change_state.state);
1288 * chipset_ready_uevent() - sends chipset_ready action
1290 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1292 * Return: 0 on success, negative on failure
1295 chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1299 res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1302 if (msg_hdr->flags.response_expected)
1303 controlvm_respond(msg_hdr, res);
1309 * chipset_selftest_uevent() - sends chipset_selftest action
1311 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1313 * Return: 0 on success, negative on failure
1316 chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1318 char env_selftest[20];
1319 char *envp[] = { env_selftest, NULL };
1322 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1323 res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1326 if (msg_hdr->flags.response_expected)
1327 controlvm_respond(msg_hdr, res);
1333 * chipset_notready_uevent() - sends chipset_notready action
1335 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1337 * Return: 0 on success, negative on failure
1340 chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1344 res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1346 if (msg_hdr->flags.response_expected)
1347 controlvm_respond(msg_hdr, res);
1353 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1355 struct vmcall_io_controlvm_addr_params params;
1356 int result = VMCALL_SUCCESS;
1359 physaddr = virt_to_phys(¶ms);
1360 ISSUE_IO_VMCALL(VMCALL_CONTROLVM_ADDR, physaddr, result);
1361 if (VMCALL_SUCCESSFUL(result)) {
1362 *control_addr = params.address;
1363 *control_bytes = params.channel_bytes;
1368 static u64 controlvm_get_channel_address(void)
1373 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1380 setup_crash_devices_work_queue(struct work_struct *work)
1382 struct controlvm_message local_crash_bus_msg;
1383 struct controlvm_message local_crash_dev_msg;
1384 struct controlvm_message msg;
1385 u32 local_crash_msg_offset;
1386 u16 local_crash_msg_count;
1388 POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
1390 /* send init chipset msg */
1391 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1392 msg.cmd.init_chipset.bus_count = 23;
1393 msg.cmd.init_chipset.switch_count = 0;
1397 /* get saved message count */
1398 if (visorchannel_read(chipset_dev->controlvm_channel,
1399 offsetof(struct spar_controlvm_channel_protocol,
1400 saved_crash_message_count),
1401 &local_crash_msg_count, sizeof(u16)) < 0) {
1402 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1407 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1408 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
1409 local_crash_msg_count,
1414 /* get saved crash message offset */
1415 if (visorchannel_read(chipset_dev->controlvm_channel,
1416 offsetof(struct spar_controlvm_channel_protocol,
1417 saved_crash_message_offset),
1418 &local_crash_msg_offset, sizeof(u32)) < 0) {
1419 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
1424 /* read create device message for storage bus offset */
1425 if (visorchannel_read(chipset_dev->controlvm_channel,
1426 local_crash_msg_offset,
1427 &local_crash_bus_msg,
1428 sizeof(struct controlvm_message)) < 0) {
1429 POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
1434 /* read create device message for storage device */
1435 if (visorchannel_read(chipset_dev->controlvm_channel,
1436 local_crash_msg_offset +
1437 sizeof(struct controlvm_message),
1438 &local_crash_dev_msg,
1439 sizeof(struct controlvm_message)) < 0) {
1440 POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
1445 /* reuse IOVM create bus message */
1446 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1447 bus_create(&local_crash_bus_msg);
1449 POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
1454 /* reuse create device message for storage device */
1455 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1456 my_device_create(&local_crash_dev_msg);
1458 POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
1462 POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1466 bus_create_response(struct visor_device *bus_info, int response)
1469 bus_info->state.created = 1;
1471 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1474 kfree(bus_info->pending_msg_hdr);
1475 bus_info->pending_msg_hdr = NULL;
1479 bus_destroy_response(struct visor_device *bus_info, int response)
1481 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1484 kfree(bus_info->pending_msg_hdr);
1485 bus_info->pending_msg_hdr = NULL;
1489 device_create_response(struct visor_device *dev_info, int response)
1492 dev_info->state.created = 1;
1494 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1497 kfree(dev_info->pending_msg_hdr);
1498 dev_info->pending_msg_hdr = NULL;
1502 device_destroy_response(struct visor_device *dev_info, int response)
1504 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1507 kfree(dev_info->pending_msg_hdr);
1508 dev_info->pending_msg_hdr = NULL;
1512 device_pause_response(struct visor_device *dev_info,
1515 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1517 segment_state_standby);
1519 kfree(dev_info->pending_msg_hdr);
1520 dev_info->pending_msg_hdr = NULL;
1524 device_resume_response(struct visor_device *dev_info, int response)
1526 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1528 segment_state_running);
1530 kfree(dev_info->pending_msg_hdr);
1531 dev_info->pending_msg_hdr = NULL;
1534 static struct parser_context *
1535 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1537 int allocbytes = sizeof(struct parser_context) + bytes;
1538 struct parser_context *ctx;
1543 * alloc an 0 extra byte to ensure payload is
1547 if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
1548 > MAX_CONTROLVM_PAYLOAD_BYTES) {
1552 ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
1558 ctx->allocbytes = allocbytes;
1559 ctx->param_bytes = bytes;
1561 ctx->bytes_remaining = 0;
1562 ctx->byte_stream = false;
1566 if (addr > virt_to_phys(high_memory - 1))
1567 goto err_finish_ctx;
1568 p = __va((unsigned long)(addr));
1569 memcpy(ctx->data, p, bytes);
1571 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1574 goto err_finish_ctx;
1575 memcpy(ctx->data, mapping, bytes);
1579 ctx->byte_stream = true;
1580 chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1590 * handle_command() - process a controlvm message
1591 * @inmsg: the message to process
1592 * @channel_addr: address of the controlvm channel
1595 * 0 - Successfully processed the message
1596 * -EAGAIN - ControlVM message was not processed and should be retried
1597 * reading the next controlvm message; a scenario where this can
1598 * occur is when we need to throttle the allocation of memory in
1599 * which to copy out controlvm payload data.
1600 * < 0 - error: ControlVM message was processed but an error occurred.
1603 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1605 struct controlvm_message_packet *cmd = &inmsg.cmd;
1608 struct parser_context *parser_ctx = NULL;
1610 struct controlvm_message ackmsg;
1613 /* create parsing context if necessary */
1614 local_addr = (inmsg.hdr.flags.test_message == 1);
1615 if (channel_addr == 0)
1618 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1619 parm_bytes = inmsg.hdr.payload_bytes;
1622 * Parameter and channel addresses within test messages actually lie
1623 * within our OS-controlled memory. We need to know that, because it
1624 * makes a difference in how we compute the virtual address.
1626 if (parm_addr && parm_bytes) {
1630 parser_init_byte_stream(parm_addr, parm_bytes,
1631 local_addr, &retry);
1632 if (!parser_ctx && retry)
1637 controlvm_init_response(&ackmsg, &inmsg.hdr,
1638 CONTROLVM_RESP_SUCCESS);
1639 err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1640 CONTROLVM_QUEUE_ACK,
1645 switch (inmsg.hdr.id) {
1646 case CONTROLVM_CHIPSET_INIT:
1647 err = chipset_init(&inmsg);
1649 case CONTROLVM_BUS_CREATE:
1650 err = bus_create(&inmsg);
1652 case CONTROLVM_BUS_DESTROY:
1653 err = bus_destroy(&inmsg);
1655 case CONTROLVM_BUS_CONFIGURE:
1656 err = bus_configure(&inmsg, parser_ctx);
1658 case CONTROLVM_DEVICE_CREATE:
1659 err = my_device_create(&inmsg);
1661 case CONTROLVM_DEVICE_CHANGESTATE:
1662 if (cmd->device_change_state.flags.phys_device) {
1663 err = parahotplug_process_message(&inmsg);
1666 * save the hdr and cmd structures for later use
1667 * when sending back the response to Command
1669 err = my_device_changestate(&inmsg);
1673 case CONTROLVM_DEVICE_DESTROY:
1674 err = my_device_destroy(&inmsg);
1676 case CONTROLVM_DEVICE_CONFIGURE:
1677 /* no op just send a respond that we passed */
1678 if (inmsg.hdr.flags.response_expected)
1679 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1681 case CONTROLVM_CHIPSET_READY:
1682 err = chipset_ready_uevent(&inmsg.hdr);
1684 case CONTROLVM_CHIPSET_SELFTEST:
1685 err = chipset_selftest_uevent(&inmsg.hdr);
1687 case CONTROLVM_CHIPSET_STOP:
1688 err = chipset_notready_uevent(&inmsg.hdr);
1692 if (inmsg.hdr.flags.response_expected)
1693 controlvm_respond(&inmsg.hdr,
1694 -CONTROLVM_RESP_ID_UNKNOWN);
1699 parser_done(parser_ctx);
1706 * read_controlvm_event() - retreives the next message from the
1707 * CONTROLVM_QUEUE_EVENT queue in the controlvm
1709 * @msg: pointer to the retrieved message
1711 * Return: 0 if valid message was retrieved or -error
1714 read_controlvm_event(struct controlvm_message *msg)
1718 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1719 CONTROLVM_QUEUE_EVENT, msg);
1724 if (msg->hdr.flags.test_message == 1)
1731 * parahotplug_process_list() - remove any request from the list that's been on
1732 * there too long and respond with an error
1735 parahotplug_process_list(void)
1737 struct list_head *pos;
1738 struct list_head *tmp;
1740 spin_lock(¶hotplug_request_list_lock);
1742 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1743 struct parahotplug_request *req =
1744 list_entry(pos, struct parahotplug_request, list);
1746 if (!time_after_eq(jiffies, req->expiration))
1750 if (req->msg.hdr.flags.response_expected)
1751 controlvm_respond_physdev_changestate(
1753 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1754 req->msg.cmd.device_change_state.state);
1755 parahotplug_request_destroy(req);
1758 spin_unlock(¶hotplug_request_list_lock);
1762 controlvm_periodic_work(struct work_struct *work)
1764 struct controlvm_message inmsg;
1767 /* Drain the RESPONSE queue make it empty */
1769 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1770 CONTROLVM_QUEUE_RESPONSE,
1777 if (chipset_dev->controlvm_pending_msg_valid) {
1779 * we throttled processing of a prior
1780 * msg, so try to process it again
1781 * rather than reading a new one
1783 inmsg = chipset_dev->controlvm_pending_msg;
1784 chipset_dev->controlvm_pending_msg_valid = false;
1787 err = read_controlvm_event(&inmsg);
1791 chipset_dev->most_recent_message_jiffies = jiffies;
1792 err = handle_command(inmsg,
1793 visorchannel_get_physaddr
1794 (chipset_dev->controlvm_channel));
1795 if (err == -EAGAIN) {
1796 chipset_dev->controlvm_pending_msg = inmsg;
1797 chipset_dev->controlvm_pending_msg_valid = true;
1801 err = read_controlvm_event(&inmsg);
1804 /* parahotplug_worker */
1805 parahotplug_process_list();
1808 if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1809 (HZ * MIN_IDLE_SECONDS))) {
1811 * it's been longer than MIN_IDLE_SECONDS since we
1812 * processed our last controlvm message; slow down the
1815 if (chipset_dev->poll_jiffies !=
1816 POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1817 chipset_dev->poll_jiffies =
1818 POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1820 if (chipset_dev->poll_jiffies !=
1821 POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1822 chipset_dev->poll_jiffies =
1823 POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1826 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1827 chipset_dev->poll_jiffies);
1831 visorchipset_init(struct acpi_device *acpi_device)
1835 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
1836 struct visorchannel *controlvm_channel;
1838 addr = controlvm_get_channel_address();
1842 chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1846 acpi_device->driver_data = chipset_dev;
1848 chipset_dev->acpi_device = acpi_device;
1849 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1850 controlvm_channel = visorchannel_create_with_lock(addr,
1851 0, GFP_KERNEL, uuid);
1853 if (!controlvm_channel)
1854 goto error_free_chipset_dev;
1856 chipset_dev->controlvm_channel = controlvm_channel;
1858 err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1859 visorchipset_dev_groups);
1861 goto error_destroy_channel;
1863 if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
1864 visorchannel_get_header(controlvm_channel)))
1865 goto error_delete_groups;
1867 /* if booting in a crash kernel */
1868 if (is_kdump_kernel())
1869 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1870 setup_crash_devices_work_queue);
1872 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1873 controlvm_periodic_work);
1875 chipset_dev->most_recent_message_jiffies = jiffies;
1876 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1877 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1878 chipset_dev->poll_jiffies);
1880 POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
1882 err = visorbus_init();
1884 goto error_cancel_work;
1889 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1891 error_delete_groups:
1892 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1893 visorchipset_dev_groups);
1895 error_destroy_channel:
1896 visorchannel_destroy(chipset_dev->controlvm_channel);
1898 error_free_chipset_dev:
1902 POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
1907 visorchipset_exit(struct acpi_device *acpi_device)
1909 POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1912 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1913 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1914 visorchipset_dev_groups);
1916 visorchannel_destroy(chipset_dev->controlvm_channel);
1919 POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
1924 static const struct acpi_device_id unisys_device_ids[] = {
1929 static struct acpi_driver unisys_acpi_driver = {
1930 .name = "unisys_acpi",
1931 .class = "unisys_acpi_class",
1932 .owner = THIS_MODULE,
1933 .ids = unisys_device_ids,
1935 .add = visorchipset_init,
1936 .remove = visorchipset_exit,
1940 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1942 static __init uint32_t visorutil_spar_detect(void)
1944 unsigned int eax, ebx, ecx, edx;
1946 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1948 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1949 return (ebx == UNISYS_SPAR_ID_EBX) &&
1950 (ecx == UNISYS_SPAR_ID_ECX) &&
1951 (edx == UNISYS_SPAR_ID_EDX);
1957 static int init_unisys(void)
1961 if (!visorutil_spar_detect())
1964 result = acpi_bus_register_driver(&unisys_acpi_driver);
1968 pr_info("Unisys Visorchipset Driver Loaded.\n");
1972 static void exit_unisys(void)
1974 acpi_bus_unregister_driver(&unisys_acpi_driver);
1977 module_init(init_unisys);
1978 module_exit(exit_unisys);
1980 MODULE_AUTHOR("Unisys");
1981 MODULE_LICENSE("GPL");
1982 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");