Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski...
[linux-2.6-block.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
6f14cc18 3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6f14cc18
BR
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
12e364b9
KC
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 */
16
55c67dca 17#include <linux/acpi.h>
c0a14641 18#include <linux/cdev.h>
46168810 19#include <linux/ctype.h>
e3420ed6
EA
20#include <linux/fs.h>
21#include <linux/mm.h>
12e364b9
KC
22#include <linux/nls.h>
23#include <linux/netdevice.h>
24#include <linux/platform_device.h>
90addb02 25#include <linux/uuid.h>
1ba00980 26#include <linux/crash_dump.h>
12e364b9 27
5f3a7e36 28#include "channel_guid.h"
55c67dca
PB
29#include "controlvmchannel.h"
30#include "controlvmcompletionstatus.h"
31#include "guestlinuxdebug.h"
32#include "periodic_work.h"
55c67dca
PB
33#include "version.h"
34#include "visorbus.h"
35#include "visorbus_private.h"
5f3a7e36 36#include "vmcallinterface.h"
55c67dca 37
12e364b9 38#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
12e364b9
KC
39
40#define MAX_NAME_SIZE 128
41#define MAX_IP_SIZE 50
42#define MAXOUTSTANDINGCHANNELCOMMAND 256
43#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
2c7e1d4e 46#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
2ee0deec
PB
47
48#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
49
d5b3f1dc
EA
50#define UNISYS_SPAR_LEAF_ID 0x40000000
51
52/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
53#define UNISYS_SPAR_ID_EBX 0x73696e55
54#define UNISYS_SPAR_ID_ECX 0x70537379
55#define UNISYS_SPAR_ID_EDX 0x34367261
56
b615d628
JS
57/*
58 * Module parameters
59 */
b615d628 60static int visorchipset_major;
4da3336c 61static int visorchipset_visorbusregwait = 1; /* default is on */
b615d628 62static int visorchipset_holdchipsetready;
46168810 63static unsigned long controlvm_payload_bytes_buffered;
12c957dc 64static u32 dump_vhba_bus;
b615d628 65
e3420ed6
EA
66static int
67visorchipset_open(struct inode *inode, struct file *file)
68{
69 unsigned minor_number = iminor(inode);
70
71 if (minor_number)
72 return -ENODEV;
73 file->private_data = NULL;
74 return 0;
75}
76
77static int
78visorchipset_release(struct inode *inode, struct file *file)
79{
80 return 0;
81}
82
12e364b9
KC
83/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84* we switch to slow polling mode. As soon as we get a controlvm
85* message, we switch back to fast polling mode.
86*/
87#define MIN_IDLE_SECONDS 10
52063eca 88static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2ee0d052
EA
89/* when we got our last controlvm message */
90static unsigned long most_recent_message_jiffies;
4da3336c 91static int visorbusregistered;
12e364b9
KC
92
93#define MAX_CHIPSET_EVENTS 2
c242233e 94static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 95
46168810
EA
96struct parser_context {
97 unsigned long allocbytes;
98 unsigned long param_bytes;
99 u8 *curr;
100 unsigned long bytes_remaining;
101 bool byte_stream;
102 char data[0];
103};
104
9232d2d6 105static struct delayed_work periodic_controlvm_work;
8f1947ac 106static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 107
e3420ed6
EA
108static struct cdev file_cdev;
109static struct visorchannel **file_controlvm_channel;
da021f02 110static struct controlvm_message_header g_chipset_msg_hdr;
4f44b72d 111static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9 112
1390b88c
BR
113static LIST_HEAD(bus_info_list);
114static LIST_HEAD(dev_info_list);
12e364b9 115
c3d9a224 116static struct visorchannel *controlvm_channel;
12e364b9 117
84982fbf 118/* Manages the request payload in the controlvm channel */
c1f834eb 119struct visor_controlvm_payload_info {
3103dc03 120 u8 *ptr; /* pointer to base address of payload pool */
5fc0229a 121 u64 offset; /* offset from beginning of controlvm
2ee0d052
EA
122 * channel to beginning of payload * pool
123 */
b3c55b13 124 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
125};
126
127static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 128
12e364b9
KC
129/* The following globals are used to handle the scenario where we are unable to
130 * offload the payload from a controlvm message due to memory requirements. In
131 * this scenario, we simply stash the controlvm message, then attempt to
132 * process it again the next time controlvm_periodic_work() runs.
133 */
7166ed19 134static struct controlvm_message controlvm_pending_msg;
c79b28f7 135static bool controlvm_pending_msg_valid;
12e364b9 136
12e364b9
KC
137/* This identifies a data buffer that has been received via a controlvm messages
138 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
139 */
140struct putfile_buffer_entry {
141 struct list_head next; /* putfile_buffer_entry list */
317d9614 142 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
143};
144
145/* List of struct putfile_request *, via next_putfile_request member.
146 * Each entry in this list identifies an outstanding TRANSMIT_FILE
147 * conversation.
148 */
1eee0011 149static LIST_HEAD(putfile_request_list);
12e364b9
KC
150
151/* This describes a buffer and its current state of transfer (e.g., how many
152 * bytes have already been supplied as putfile data, and how many bytes are
153 * remaining) for a putfile_request.
154 */
155struct putfile_active_buffer {
156 /* a payload from a controlvm message, containing a file data buffer */
317d9614 157 struct parser_context *parser_ctx;
12e364b9
KC
158 /* points within data area of parser_ctx to next byte of data */
159 u8 *pnext;
160 /* # bytes left from <pnext> to the end of this data buffer */
161 size_t bytes_remaining;
162};
163
164#define PUTFILE_REQUEST_SIG 0x0906101302281211
165/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
166 * conversation. Structs of this type are dynamically linked into
167 * <Putfile_request_list>.
168 */
169struct putfile_request {
170 u64 sig; /* PUTFILE_REQUEST_SIG */
171
172 /* header from original TransmitFile request */
98d7b594 173 struct controlvm_message_header controlvm_header;
12e364b9
KC
174 u64 file_request_number; /* from original TransmitFile request */
175
176 /* link to next struct putfile_request */
177 struct list_head next_putfile_request;
178
179 /* most-recent sequence number supplied via a controlvm message */
180 u64 data_sequence_number;
181
182 /* head of putfile_buffer_entry list, which describes the data to be
183 * supplied as putfile data;
184 * - this list is added to when controlvm messages come in that supply
185 * file data
186 * - this list is removed from via the hotplug program that is actually
2ee0d052
EA
187 * consuming these buffers to write as file data
188 */
12e364b9
KC
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204};
205
12e364b9
KC
206struct parahotplug_request {
207 struct list_head list;
208 int id;
209 unsigned long expiration;
3ab47701 210 struct controlvm_message msg;
12e364b9
KC
211};
212
ddf5de53
BR
213static LIST_HEAD(parahotplug_request_list);
214static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
215static void parahotplug_process_list(void);
216
217/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
218 * CONTROLVM_REPORTEVENT.
219 */
4da3336c 220static struct visorchipset_busdev_notifiers busdev_notifiers;
12e364b9 221
d32517e3
DZ
222static void bus_create_response(struct visor_device *p, int response);
223static void bus_destroy_response(struct visor_device *p, int response);
a298bc0b
DZ
224static void device_create_response(struct visor_device *p, int response);
225static void device_destroy_response(struct visor_device *p, int response);
226static void device_resume_response(struct visor_device *p, int response);
12e364b9 227
a298bc0b
DZ
228static void visorchipset_device_pause_response(struct visor_device *p,
229 int response);
2ee0deec 230
8e3fedd6 231static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
927c7927 236 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
237 .device_resume = device_resume_response,
238};
239
240/* info for /dev/visorchipset */
5aa8ae57 241static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 242
19f6634f
BR
243/* prototypes for attributes */
244static ssize_t toolaction_show(struct device *dev,
8e76e695 245 struct device_attribute *attr, char *buf);
19f6634f 246static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
247 struct device_attribute *attr,
248 const char *buf, size_t count);
19f6634f
BR
249static DEVICE_ATTR_RW(toolaction);
250
54b31229 251static ssize_t boottotool_show(struct device *dev,
8e76e695 252 struct device_attribute *attr, char *buf);
54b31229 253static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
254 struct device_attribute *attr, const char *buf,
255 size_t count);
54b31229
BR
256static DEVICE_ATTR_RW(boottotool);
257
422af17c 258static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 259 char *buf);
422af17c 260static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 261 const char *buf, size_t count);
422af17c
BR
262static DEVICE_ATTR_RW(error);
263
264static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 265 char *buf);
422af17c 266static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 267 const char *buf, size_t count);
422af17c
BR
268static DEVICE_ATTR_RW(textid);
269
270static ssize_t remaining_steps_show(struct device *dev,
8e76e695 271 struct device_attribute *attr, char *buf);
422af17c 272static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
273 struct device_attribute *attr,
274 const char *buf, size_t count);
422af17c
BR
275static DEVICE_ATTR_RW(remaining_steps);
276
18b87ed1 277static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
278 struct device_attribute *attr,
279 const char *buf, size_t count);
18b87ed1
BR
280static DEVICE_ATTR_WO(chipsetready);
281
e56fa7cd 282static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
283 struct device_attribute *attr,
284 const char *buf, size_t count);
e56fa7cd
BR
285static DEVICE_ATTR_WO(devicedisabled);
286
287static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
288 struct device_attribute *attr,
289 const char *buf, size_t count);
e56fa7cd
BR
290static DEVICE_ATTR_WO(deviceenabled);
291
19f6634f
BR
292static struct attribute *visorchipset_install_attrs[] = {
293 &dev_attr_toolaction.attr,
54b31229 294 &dev_attr_boottotool.attr,
422af17c
BR
295 &dev_attr_error.attr,
296 &dev_attr_textid.attr,
297 &dev_attr_remaining_steps.attr,
19f6634f
BR
298 NULL
299};
300
301static struct attribute_group visorchipset_install_group = {
302 .name = "install",
303 .attrs = visorchipset_install_attrs
304};
305
18b87ed1
BR
306static struct attribute *visorchipset_guest_attrs[] = {
307 &dev_attr_chipsetready.attr,
308 NULL
309};
310
311static struct attribute_group visorchipset_guest_group = {
312 .name = "guest",
313 .attrs = visorchipset_guest_attrs
314};
315
e56fa7cd
BR
316static struct attribute *visorchipset_parahotplug_attrs[] = {
317 &dev_attr_devicedisabled.attr,
318 &dev_attr_deviceenabled.attr,
319 NULL
320};
321
322static struct attribute_group visorchipset_parahotplug_group = {
323 .name = "parahotplug",
324 .attrs = visorchipset_parahotplug_attrs
325};
326
19f6634f
BR
327static const struct attribute_group *visorchipset_dev_groups[] = {
328 &visorchipset_install_group,
18b87ed1 329 &visorchipset_guest_group,
e56fa7cd 330 &visorchipset_parahotplug_group,
19f6634f
BR
331 NULL
332};
333
04dacacc
DZ
334static void visorchipset_dev_release(struct device *dev)
335{
336}
337
12e364b9 338/* /sys/devices/platform/visorchipset */
eb34e877 339static struct platform_device visorchipset_platform_device = {
12e364b9
KC
340 .name = "visorchipset",
341 .id = -1,
19f6634f 342 .dev.groups = visorchipset_dev_groups,
04dacacc 343 .dev.release = visorchipset_dev_release,
12e364b9
KC
344};
345
346/* Function prototypes */
b3168c70 347static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
348 int response);
349static void controlvm_respond_chipset_init(
b3168c70 350 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
351 enum ultra_chipset_feature features);
352static void controlvm_respond_physdev_changestate(
b3168c70 353 struct controlvm_message_header *msg_hdr, int response,
98d7b594 354 struct spar_segment_state state);
12e364b9 355
2ee0deec
PB
356static void parser_done(struct parser_context *ctx);
357
46168810 358static struct parser_context *
fbf35536 359parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
46168810
EA
360{
361 int allocbytes = sizeof(struct parser_context) + bytes;
362 struct parser_context *rc = NULL;
363 struct parser_context *ctx = NULL;
46168810
EA
364
365 if (retry)
366 *retry = false;
cc55b5c5
JS
367
368 /*
369 * alloc an 0 extra byte to ensure payload is
370 * '\0'-terminated
371 */
372 allocbytes++;
46168810
EA
373 if ((controlvm_payload_bytes_buffered + bytes)
374 > MAX_CONTROLVM_PAYLOAD_BYTES) {
375 if (retry)
376 *retry = true;
377 rc = NULL;
378 goto cleanup;
379 }
8c395e74 380 ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
46168810
EA
381 if (!ctx) {
382 if (retry)
383 *retry = true;
384 rc = NULL;
385 goto cleanup;
386 }
387
388 ctx->allocbytes = allocbytes;
389 ctx->param_bytes = bytes;
390 ctx->curr = NULL;
391 ctx->bytes_remaining = 0;
392 ctx->byte_stream = false;
393 if (local) {
394 void *p;
395
396 if (addr > virt_to_phys(high_memory - 1)) {
397 rc = NULL;
398 goto cleanup;
399 }
0e7bf2f4 400 p = __va((unsigned long)(addr));
46168810
EA
401 memcpy(ctx->data, p, bytes);
402 } else {
a8deaef3 403 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
dd412751 404
dd412751 405 if (!mapping) {
46168810
EA
406 rc = NULL;
407 goto cleanup;
408 }
3103dc03 409 memcpy(ctx->data, mapping, bytes);
3103dc03 410 memunmap(mapping);
46168810 411 }
46168810 412
cc55b5c5 413 ctx->byte_stream = true;
46168810
EA
414 rc = ctx;
415cleanup:
46168810
EA
416 if (rc) {
417 controlvm_payload_bytes_buffered += ctx->param_bytes;
418 } else {
419 if (ctx) {
420 parser_done(ctx);
421 ctx = NULL;
422 }
423 }
424 return rc;
425}
426
464129ed 427static uuid_le
46168810
EA
428parser_id_get(struct parser_context *ctx)
429{
430 struct spar_controlvm_parameters_header *phdr = NULL;
431
e4a3dd33 432 if (!ctx)
46168810
EA
433 return NULL_UUID_LE;
434 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
435 return phdr->id;
436}
437
2ee0deec
PB
438/** Describes the state from the perspective of which controlvm messages have
439 * been received for a bus or device.
440 */
441
442enum PARSER_WHICH_STRING {
443 PARSERSTRING_INITIATOR,
444 PARSERSTRING_TARGET,
445 PARSERSTRING_CONNECTION,
446 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
447};
448
464129ed 449static void
2ee0deec
PB
450parser_param_start(struct parser_context *ctx,
451 enum PARSER_WHICH_STRING which_string)
46168810
EA
452{
453 struct spar_controlvm_parameters_header *phdr = NULL;
454
e4a3dd33 455 if (!ctx)
b4d4dfbc
BR
456 return;
457
46168810
EA
458 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
459 switch (which_string) {
460 case PARSERSTRING_INITIATOR:
461 ctx->curr = ctx->data + phdr->initiator_offset;
462 ctx->bytes_remaining = phdr->initiator_length;
463 break;
464 case PARSERSTRING_TARGET:
465 ctx->curr = ctx->data + phdr->target_offset;
466 ctx->bytes_remaining = phdr->target_length;
467 break;
468 case PARSERSTRING_CONNECTION:
469 ctx->curr = ctx->data + phdr->connection_offset;
470 ctx->bytes_remaining = phdr->connection_length;
471 break;
472 case PARSERSTRING_NAME:
473 ctx->curr = ctx->data + phdr->name_offset;
474 ctx->bytes_remaining = phdr->name_length;
475 break;
476 default:
477 break;
478 }
46168810
EA
479}
480
464129ed 481static void parser_done(struct parser_context *ctx)
46168810
EA
482{
483 if (!ctx)
484 return;
485 controlvm_payload_bytes_buffered -= ctx->param_bytes;
486 kfree(ctx);
487}
488
464129ed 489static void *
46168810
EA
490parser_string_get(struct parser_context *ctx)
491{
492 u8 *pscan;
493 unsigned long nscan;
494 int value_length = -1;
495 void *value = NULL;
496 int i;
497
498 if (!ctx)
499 return NULL;
500 pscan = ctx->curr;
501 nscan = ctx->bytes_remaining;
502 if (nscan == 0)
503 return NULL;
504 if (!pscan)
505 return NULL;
506 for (i = 0, value_length = -1; i < nscan; i++)
507 if (pscan[i] == '\0') {
508 value_length = i;
509 break;
510 }
511 if (value_length < 0) /* '\0' was not included in the length */
512 value_length = nscan;
8c395e74 513 value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
e4a3dd33 514 if (!value)
46168810
EA
515 return NULL;
516 if (value_length > 0)
517 memcpy(value, pscan, value_length);
0e7bf2f4 518 ((u8 *)(value))[value_length] = '\0';
46168810
EA
519 return value;
520}
521
d746cb55
VB
522static ssize_t toolaction_show(struct device *dev,
523 struct device_attribute *attr,
524 char *buf)
19f6634f 525{
01f4d85a 526 u8 tool_action;
19f6634f 527
c3d9a224 528 visorchannel_read(controlvm_channel,
6bb871b6
BR
529 offsetof(struct spar_controlvm_channel_protocol,
530 tool_action), &tool_action, sizeof(u8));
01f4d85a 531 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
532}
533
d746cb55
VB
534static ssize_t toolaction_store(struct device *dev,
535 struct device_attribute *attr,
536 const char *buf, size_t count)
19f6634f 537{
01f4d85a 538 u8 tool_action;
66e24b76 539 int ret;
19f6634f 540
ebec8967 541 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
542 return -EINVAL;
543
c3d9a224 544 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
545 offsetof(struct spar_controlvm_channel_protocol,
546 tool_action),
01f4d85a 547 &tool_action, sizeof(u8));
66e24b76
BR
548
549 if (ret)
550 return ret;
e22a4a0f 551 return count;
19f6634f
BR
552}
553
d746cb55
VB
554static ssize_t boottotool_show(struct device *dev,
555 struct device_attribute *attr,
556 char *buf)
54b31229 557{
365522d9 558 struct efi_spar_indication efi_spar_indication;
54b31229 559
c3d9a224 560 visorchannel_read(controlvm_channel,
8e76e695
BR
561 offsetof(struct spar_controlvm_channel_protocol,
562 efi_spar_ind), &efi_spar_indication,
563 sizeof(struct efi_spar_indication));
54b31229 564 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 565 efi_spar_indication.boot_to_tool);
54b31229
BR
566}
567
d746cb55
VB
568static ssize_t boottotool_store(struct device *dev,
569 struct device_attribute *attr,
570 const char *buf, size_t count)
54b31229 571{
66e24b76 572 int val, ret;
365522d9 573 struct efi_spar_indication efi_spar_indication;
54b31229 574
ebec8967 575 if (kstrtoint(buf, 10, &val))
66e24b76
BR
576 return -EINVAL;
577
365522d9 578 efi_spar_indication.boot_to_tool = val;
c3d9a224 579 ret = visorchannel_write(controlvm_channel,
d19642f6 580 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
581 efi_spar_ind), &(efi_spar_indication),
582 sizeof(struct efi_spar_indication));
66e24b76
BR
583
584 if (ret)
585 return ret;
e22a4a0f 586 return count;
54b31229 587}
422af17c
BR
588
589static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 590 char *buf)
422af17c
BR
591{
592 u32 error;
593
8e76e695
BR
594 visorchannel_read(controlvm_channel,
595 offsetof(struct spar_controlvm_channel_protocol,
596 installation_error),
597 &error, sizeof(u32));
422af17c
BR
598 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
599}
600
601static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 602 const char *buf, size_t count)
422af17c
BR
603{
604 u32 error;
66e24b76 605 int ret;
422af17c 606
ebec8967 607 if (kstrtou32(buf, 10, &error))
66e24b76
BR
608 return -EINVAL;
609
c3d9a224 610 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
611 offsetof(struct spar_controlvm_channel_protocol,
612 installation_error),
613 &error, sizeof(u32));
66e24b76
BR
614 if (ret)
615 return ret;
e22a4a0f 616 return count;
422af17c
BR
617}
618
619static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 620 char *buf)
422af17c 621{
10dbf0e3 622 u32 text_id;
422af17c 623
8e76e695
BR
624 visorchannel_read(controlvm_channel,
625 offsetof(struct spar_controlvm_channel_protocol,
626 installation_text_id),
627 &text_id, sizeof(u32));
10dbf0e3 628 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
629}
630
631static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 632 const char *buf, size_t count)
422af17c 633{
10dbf0e3 634 u32 text_id;
66e24b76 635 int ret;
422af17c 636
ebec8967 637 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
638 return -EINVAL;
639
c3d9a224 640 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
641 offsetof(struct spar_controlvm_channel_protocol,
642 installation_text_id),
643 &text_id, sizeof(u32));
66e24b76
BR
644 if (ret)
645 return ret;
e22a4a0f 646 return count;
422af17c
BR
647}
648
422af17c 649static ssize_t remaining_steps_show(struct device *dev,
8e76e695 650 struct device_attribute *attr, char *buf)
422af17c 651{
ee8da290 652 u16 remaining_steps;
422af17c 653
c3d9a224 654 visorchannel_read(controlvm_channel,
8e76e695
BR
655 offsetof(struct spar_controlvm_channel_protocol,
656 installation_remaining_steps),
657 &remaining_steps, sizeof(u16));
ee8da290 658 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
659}
660
661static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
662 struct device_attribute *attr,
663 const char *buf, size_t count)
422af17c 664{
ee8da290 665 u16 remaining_steps;
66e24b76 666 int ret;
422af17c 667
ebec8967 668 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
669 return -EINVAL;
670
c3d9a224 671 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
672 offsetof(struct spar_controlvm_channel_protocol,
673 installation_remaining_steps),
674 &remaining_steps, sizeof(u16));
66e24b76
BR
675 if (ret)
676 return ret;
e22a4a0f 677 return count;
422af17c
BR
678}
679
ab0592b9
DZ
680struct visor_busdev {
681 u32 bus_no;
682 u32 dev_no;
683};
684
685static int match_visorbus_dev_by_id(struct device *dev, void *data)
686{
687 struct visor_device *vdev = to_visor_device(dev);
7f44582e 688 struct visor_busdev *id = data;
ab0592b9
DZ
689 u32 bus_no = id->bus_no;
690 u32 dev_no = id->dev_no;
691
65bd6e46
DZ
692 if ((vdev->chipset_bus_no == bus_no) &&
693 (vdev->chipset_dev_no == dev_no))
ab0592b9
DZ
694 return 1;
695
696 return 0;
697}
d1e08637 698
ab0592b9
DZ
699struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
700 struct visor_device *from)
701{
702 struct device *dev;
703 struct device *dev_start = NULL;
704 struct visor_device *vdev = NULL;
705 struct visor_busdev id = {
706 .bus_no = bus_no,
707 .dev_no = dev_no
708 };
709
710 if (from)
711 dev_start = &from->device;
712 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
713 match_visorbus_dev_by_id);
714 if (dev)
715 vdev = to_visor_device(dev);
716 return vdev;
717}
718EXPORT_SYMBOL(visorbus_get_device_by_id);
719
c242233e 720static u8
12e364b9
KC
721check_chipset_events(void)
722{
723 int i;
c242233e 724 u8 send_msg = 1;
12e364b9
KC
725 /* Check events to determine if response should be sent */
726 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
727 send_msg &= chipset_events[i];
728 return send_msg;
729}
730
731static void
732clear_chipset_events(void)
733{
734 int i;
735 /* Clear chipset_events */
736 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
737 chipset_events[i] = 0;
738}
739
740void
4da3336c 741visorchipset_register_busdev(
fe90d892 742 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 743 struct visorchipset_busdev_responders *responders,
1e7a59c1 744 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 745{
8f1947ac 746 down(&notifier_lock);
38f736e9 747 if (!notifiers) {
4da3336c
DK
748 memset(&busdev_notifiers, 0,
749 sizeof(busdev_notifiers));
750 visorbusregistered = 0; /* clear flag */
12e364b9 751 } else {
4da3336c
DK
752 busdev_notifiers = *notifiers;
753 visorbusregistered = 1; /* set flag */
12e364b9
KC
754 }
755 if (responders)
8e3fedd6 756 *responders = busdev_responders;
1e7a59c1
BR
757 if (driver_info)
758 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 759 VERSION, NULL);
12e364b9 760
8f1947ac 761 up(&notifier_lock);
12e364b9 762}
4da3336c 763EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
12e364b9 764
12e364b9 765static void
3ab47701 766chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
767{
768 static int chipset_inited;
b9b141e8 769 enum ultra_chipset_feature features = 0;
12e364b9
KC
770 int rc = CONTROLVM_RESP_SUCCESS;
771
772 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
773 if (chipset_inited) {
22ad57ba 774 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 775 goto cleanup;
12e364b9
KC
776 }
777 chipset_inited = 1;
778 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
779
780 /* Set features to indicate we support parahotplug (if Command
2ee0d052
EA
781 * also supports it).
782 */
12e364b9 783 features =
2ea5117b 784 inmsg->cmd.init_chipset.
12e364b9
KC
785 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
786
787 /* Set the "reply" bit so Command knows this is a
2ee0d052
EA
788 * features-aware driver.
789 */
12e364b9
KC
790 features |= ULTRA_CHIPSET_FEATURE_REPLY;
791
e3199b2e 792cleanup:
98d7b594 793 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
794 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
795}
796
797static void
3ab47701 798controlvm_init_response(struct controlvm_message *msg,
b3168c70 799 struct controlvm_message_header *msg_hdr, int response)
12e364b9 800{
3ab47701 801 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 802 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
803 msg->hdr.payload_bytes = 0;
804 msg->hdr.payload_vm_offset = 0;
805 msg->hdr.payload_max_bytes = 0;
12e364b9 806 if (response < 0) {
98d7b594 807 msg->hdr.flags.failed = 1;
0e7bf2f4 808 msg->hdr.completion_status = (u32)(-response);
12e364b9
KC
809 }
810}
811
812static void
b3168c70 813controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 814{
3ab47701 815 struct controlvm_message outmsg;
26eb2c0c 816
b3168c70 817 controlvm_init_response(&outmsg, msg_hdr, response);
2098dbd1 818 if (outmsg.hdr.flags.test_message == 1)
12e364b9 819 return;
2098dbd1 820
c3d9a224 821 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 822 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
823 return;
824 }
825}
826
827static void
b3168c70 828controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 829 int response,
b9b141e8 830 enum ultra_chipset_feature features)
12e364b9 831{
3ab47701 832 struct controlvm_message outmsg;
26eb2c0c 833
b3168c70 834 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 835 outmsg.cmd.init_chipset.features = features;
c3d9a224 836 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 837 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
838 return;
839 }
840}
841
98d7b594 842static void controlvm_respond_physdev_changestate(
b3168c70 843 struct controlvm_message_header *msg_hdr, int response,
98d7b594 844 struct spar_segment_state state)
12e364b9 845{
3ab47701 846 struct controlvm_message outmsg;
26eb2c0c 847
b3168c70 848 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
849 outmsg.cmd.device_change_state.state = state;
850 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 851 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 852 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
853 return;
854 }
855}
856
2ee0deec
PB
857enum crash_obj_type {
858 CRASH_DEV,
859 CRASH_BUS,
860};
861
12c957dc
TS
862static void
863save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
864{
865 u32 local_crash_msg_offset;
866 u16 local_crash_msg_count;
867
868 if (visorchannel_read(controlvm_channel,
869 offsetof(struct spar_controlvm_channel_protocol,
870 saved_crash_message_count),
871 &local_crash_msg_count, sizeof(u16)) < 0) {
872 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
873 POSTCODE_SEVERITY_ERR);
874 return;
875 }
876
877 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
878 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
879 local_crash_msg_count,
880 POSTCODE_SEVERITY_ERR);
881 return;
882 }
883
884 if (visorchannel_read(controlvm_channel,
885 offsetof(struct spar_controlvm_channel_protocol,
886 saved_crash_message_offset),
887 &local_crash_msg_offset, sizeof(u32)) < 0) {
888 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
889 POSTCODE_SEVERITY_ERR);
890 return;
891 }
892
893 if (typ == CRASH_BUS) {
894 if (visorchannel_write(controlvm_channel,
895 local_crash_msg_offset,
896 msg,
897 sizeof(struct controlvm_message)) < 0) {
898 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
899 POSTCODE_SEVERITY_ERR);
900 return;
901 }
902 } else {
903 local_crash_msg_offset += sizeof(struct controlvm_message);
904 if (visorchannel_write(controlvm_channel,
905 local_crash_msg_offset,
906 msg,
907 sizeof(struct controlvm_message)) < 0) {
908 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
909 POSTCODE_SEVERITY_ERR);
910 return;
911 }
912 }
913}
914
12e364b9 915static void
0274b5ae
DZ
916bus_responder(enum controlvm_id cmd_id,
917 struct controlvm_message_header *pending_msg_hdr,
3032aedd 918 int response)
12e364b9 919{
e4a3dd33 920 if (!pending_msg_hdr)
0274b5ae 921 return; /* no controlvm response needed */
12e364b9 922
0274b5ae 923 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 924 return;
0aca7844 925
0274b5ae 926 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
927}
928
929static void
fbb31f48 930device_changestate_responder(enum controlvm_id cmd_id,
a298bc0b 931 struct visor_device *p, int response,
fbb31f48 932 struct spar_segment_state response_state)
12e364b9 933{
3ab47701 934 struct controlvm_message outmsg;
a298bc0b
DZ
935 u32 bus_no = p->chipset_bus_no;
936 u32 dev_no = p->chipset_dev_no;
12e364b9 937
e4a3dd33 938 if (!p->pending_msg_hdr)
12e364b9 939 return; /* no controlvm response needed */
0274b5ae 940 if (p->pending_msg_hdr->id != cmd_id)
12e364b9 941 return;
12e364b9 942
0274b5ae 943 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
12e364b9 944
fbb31f48
BR
945 outmsg.cmd.device_change_state.bus_no = bus_no;
946 outmsg.cmd.device_change_state.dev_no = dev_no;
947 outmsg.cmd.device_change_state.state = response_state;
12e364b9 948
c3d9a224 949 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 950 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 951 return;
12e364b9
KC
952}
953
954static void
0274b5ae
DZ
955device_responder(enum controlvm_id cmd_id,
956 struct controlvm_message_header *pending_msg_hdr,
b4b598fd 957 int response)
12e364b9 958{
e4a3dd33 959 if (!pending_msg_hdr)
12e364b9 960 return; /* no controlvm response needed */
0aca7844 961
0274b5ae 962 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 963 return;
0aca7844 964
0274b5ae 965 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
966}
967
968static void
d32517e3 969bus_epilog(struct visor_device *bus_info,
2836c6a8 970 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 971 int response, bool need_response)
12e364b9 972{
f4c11551 973 bool notified = false;
0274b5ae 974 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 975
0274b5ae
DZ
976 if (!bus_info) {
977 /* relying on a valid passed in response code */
978 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
979 pmsg_hdr = msg_hdr;
980 goto away;
981 }
982
983 if (bus_info->pending_msg_hdr) {
984 /* only non-NULL if dev is still waiting on a response */
985 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
986 pmsg_hdr = bus_info->pending_msg_hdr;
987 goto away;
988 }
0aca7844 989
2836c6a8 990 if (need_response) {
0274b5ae
DZ
991 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
992 if (!pmsg_hdr) {
993 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
994 goto away;
995 }
996
997 memcpy(pmsg_hdr, msg_hdr,
98d7b594 998 sizeof(struct controlvm_message_header));
0274b5ae 999 bus_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 1000 }
12e364b9 1001
8f1947ac 1002 down(&notifier_lock);
12e364b9
KC
1003 if (response == CONTROLVM_RESP_SUCCESS) {
1004 switch (cmd) {
1005 case CONTROLVM_BUS_CREATE:
4da3336c 1006 if (busdev_notifiers.bus_create) {
3032aedd 1007 (*busdev_notifiers.bus_create) (bus_info);
f4c11551 1008 notified = true;
12e364b9
KC
1009 }
1010 break;
1011 case CONTROLVM_BUS_DESTROY:
4da3336c 1012 if (busdev_notifiers.bus_destroy) {
3032aedd 1013 (*busdev_notifiers.bus_destroy) (bus_info);
f4c11551 1014 notified = true;
12e364b9
KC
1015 }
1016 break;
1017 }
1018 }
0274b5ae 1019away:
12e364b9
KC
1020 if (notified)
1021 /* The callback function just called above is responsible
929aa8ae 1022 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1023 * function, which will call bus_responder()
1024 */
1025 ;
1026 else
0274b5ae
DZ
1027 /*
1028 * Do not kfree(pmsg_hdr) as this is the failure path.
1029 * The success path ('notified') will call the responder
1030 * directly and kfree() there.
1031 */
1032 bus_responder(cmd, pmsg_hdr, response);
8f1947ac 1033 up(&notifier_lock);
12e364b9
KC
1034}
1035
1036static void
a298bc0b 1037device_epilog(struct visor_device *dev_info,
b4b598fd 1038 struct spar_segment_state state, u32 cmd,
2836c6a8 1039 struct controlvm_message_header *msg_hdr, int response,
f4c11551 1040 bool need_response, bool for_visorbus)
12e364b9 1041{
e82ba62e 1042 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 1043 bool notified = false;
0274b5ae 1044 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 1045
4da3336c
DK
1046 notifiers = &busdev_notifiers;
1047
0274b5ae
DZ
1048 if (!dev_info) {
1049 /* relying on a valid passed in response code */
1050 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1051 pmsg_hdr = msg_hdr;
1052 goto away;
1053 }
1054
1055 if (dev_info->pending_msg_hdr) {
1056 /* only non-NULL if dev is still waiting on a response */
1057 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1058 pmsg_hdr = dev_info->pending_msg_hdr;
1059 goto away;
1060 }
1061
2836c6a8 1062 if (need_response) {
0274b5ae
DZ
1063 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1064 if (!pmsg_hdr) {
1065 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1066 goto away;
1067 }
1068
1069 memcpy(pmsg_hdr, msg_hdr,
98d7b594 1070 sizeof(struct controlvm_message_header));
0274b5ae 1071 dev_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 1072 }
12e364b9 1073
8f1947ac 1074 down(&notifier_lock);
12e364b9
KC
1075 if (response >= 0) {
1076 switch (cmd) {
1077 case CONTROLVM_DEVICE_CREATE:
1078 if (notifiers->device_create) {
b4b598fd 1079 (*notifiers->device_create) (dev_info);
f4c11551 1080 notified = true;
12e364b9
KC
1081 }
1082 break;
1083 case CONTROLVM_DEVICE_CHANGESTATE:
1084 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1085 if (state.alive == segment_state_running.alive &&
1086 state.operating ==
1087 segment_state_running.operating) {
12e364b9 1088 if (notifiers->device_resume) {
b4b598fd 1089 (*notifiers->device_resume) (dev_info);
f4c11551 1090 notified = true;
12e364b9
KC
1091 }
1092 }
1093 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1094 else if (state.alive == segment_state_standby.alive &&
3f833b54 1095 state.operating ==
bd0d2dcc 1096 segment_state_standby.operating) {
12e364b9
KC
1097 /* technically this is standby case
1098 * where server is lost
1099 */
1100 if (notifiers->device_pause) {
b4b598fd 1101 (*notifiers->device_pause) (dev_info);
f4c11551 1102 notified = true;
12e364b9 1103 }
12e364b9
KC
1104 }
1105 break;
1106 case CONTROLVM_DEVICE_DESTROY:
1107 if (notifiers->device_destroy) {
b4b598fd 1108 (*notifiers->device_destroy) (dev_info);
f4c11551 1109 notified = true;
12e364b9
KC
1110 }
1111 break;
1112 }
1113 }
0274b5ae 1114away:
12e364b9
KC
1115 if (notified)
1116 /* The callback function just called above is responsible
929aa8ae 1117 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1118 * function, which will call device_responder()
1119 */
1120 ;
1121 else
0274b5ae
DZ
1122 /*
1123 * Do not kfree(pmsg_hdr) as this is the failure path.
1124 * The success path ('notified') will call the responder
1125 * directly and kfree() there.
1126 */
1127 device_responder(cmd, pmsg_hdr, response);
8f1947ac 1128 up(&notifier_lock);
12e364b9
KC
1129}
1130
1131static void
3ab47701 1132bus_create(struct controlvm_message *inmsg)
12e364b9 1133{
2ea5117b 1134 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1135 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1136 int rc = CONTROLVM_RESP_SUCCESS;
d32517e3 1137 struct visor_device *bus_info;
b32c4997 1138 struct visorchannel *visorchannel;
12e364b9 1139
d32517e3 1140 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
6c5fed35
BR
1141 if (bus_info && (bus_info->state.created == 1)) {
1142 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1143 POSTCODE_SEVERITY_ERR);
22ad57ba 1144 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1145 goto cleanup;
12e364b9 1146 }
6c5fed35
BR
1147 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1148 if (!bus_info) {
1149 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1150 POSTCODE_SEVERITY_ERR);
22ad57ba 1151 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1152 goto cleanup;
12e364b9
KC
1153 }
1154
4abce83d 1155 INIT_LIST_HEAD(&bus_info->list_all);
d32517e3
DZ
1156 bus_info->chipset_bus_no = bus_no;
1157 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
12e364b9 1158
6c5fed35 1159 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1160
b32c4997
DZ
1161 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1162 cmd->create_bus.channel_bytes,
1163 GFP_KERNEL,
1164 cmd->create_bus.bus_data_type_uuid);
1165
1166 if (!visorchannel) {
1167 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1168 POSTCODE_SEVERITY_ERR);
1169 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1170 kfree(bus_info);
1171 bus_info = NULL;
1172 goto cleanup;
1173 }
1174 bus_info->visorchannel = visorchannel;
12c957dc
TS
1175 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
1176 dump_vhba_bus = bus_no;
1177 save_crash_message(inmsg, CRASH_BUS);
1178 }
12e364b9 1179
6c5fed35 1180 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1181
6c5fed35 1182cleanup:
3032aedd 1183 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1184 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1185}
1186
1187static void
3ab47701 1188bus_destroy(struct controlvm_message *inmsg)
12e364b9 1189{
2ea5117b 1190 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1191 u32 bus_no = cmd->destroy_bus.bus_no;
d32517e3 1192 struct visor_device *bus_info;
12e364b9
KC
1193 int rc = CONTROLVM_RESP_SUCCESS;
1194
d32517e3 1195 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
dff54cd6 1196 if (!bus_info)
22ad57ba 1197 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1198 else if (bus_info->state.created == 0)
22ad57ba 1199 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1200
3032aedd 1201 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1202 rc, inmsg->hdr.flags.response_expected == 1);
d32517e3
DZ
1203
1204 /* bus_info is freed as part of the busdevice_release function */
12e364b9
KC
1205}
1206
1207static void
317d9614
BR
1208bus_configure(struct controlvm_message *inmsg,
1209 struct parser_context *parser_ctx)
12e364b9 1210{
2ea5117b 1211 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e 1212 u32 bus_no;
d32517e3 1213 struct visor_device *bus_info;
12e364b9 1214 int rc = CONTROLVM_RESP_SUCCESS;
12e364b9 1215
654bada0
BR
1216 bus_no = cmd->configure_bus.bus_no;
1217 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1218 POSTCODE_SEVERITY_INFO);
12e364b9 1219
d32517e3 1220 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
654bada0
BR
1221 if (!bus_info) {
1222 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1223 POSTCODE_SEVERITY_ERR);
22ad57ba 1224 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1225 } else if (bus_info->state.created == 0) {
1226 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1227 POSTCODE_SEVERITY_ERR);
22ad57ba 1228 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
e4a3dd33 1229 } else if (bus_info->pending_msg_hdr) {
654bada0 1230 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1231 POSTCODE_SEVERITY_ERR);
22ad57ba 1232 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0 1233 } else {
b32c4997
DZ
1234 visorchannel_set_clientpartition(bus_info->visorchannel,
1235 cmd->configure_bus.guest_handle);
654bada0
BR
1236 bus_info->partition_uuid = parser_id_get(parser_ctx);
1237 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1238 bus_info->name = parser_string_get(parser_ctx);
1239
654bada0
BR
1240 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1241 POSTCODE_SEVERITY_INFO);
12e364b9 1242 }
3032aedd 1243 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1244 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1245}
1246
1247static void
3ab47701 1248my_device_create(struct controlvm_message *inmsg)
12e364b9 1249{
2ea5117b 1250 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1251 u32 bus_no = cmd->create_device.bus_no;
1252 u32 dev_no = cmd->create_device.dev_no;
a298bc0b 1253 struct visor_device *dev_info = NULL;
d32517e3 1254 struct visor_device *bus_info;
b32c4997 1255 struct visorchannel *visorchannel;
12e364b9
KC
1256 int rc = CONTROLVM_RESP_SUCCESS;
1257
a298bc0b
DZ
1258 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1259 if (!bus_info) {
c60c8e26 1260 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1261 POSTCODE_SEVERITY_ERR);
a298bc0b 1262 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1263 goto cleanup;
12e364b9 1264 }
a298bc0b
DZ
1265
1266 if (bus_info->state.created == 0) {
c60c8e26 1267 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1268 POSTCODE_SEVERITY_ERR);
22ad57ba 1269 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1270 goto cleanup;
12e364b9 1271 }
a298bc0b
DZ
1272
1273 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1274 if (dev_info && (dev_info->state.created == 1)) {
c60c8e26 1275 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1276 POSTCODE_SEVERITY_ERR);
a298bc0b 1277 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1278 goto cleanup;
12e364b9 1279 }
a298bc0b 1280
c60c8e26
BR
1281 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1282 if (!dev_info) {
1283 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1284 POSTCODE_SEVERITY_ERR);
22ad57ba 1285 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1286 goto cleanup;
12e364b9 1287 }
97a84f12 1288
a298bc0b
DZ
1289 dev_info->chipset_bus_no = bus_no;
1290 dev_info->chipset_dev_no = dev_no;
1291 dev_info->inst = cmd->create_device.dev_inst_uuid;
1292
1293 /* not sure where the best place to set the 'parent' */
1294 dev_info->device.parent = &bus_info->device;
1295
c60c8e26 1296 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1297 POSTCODE_SEVERITY_INFO);
1298
a3ef1a8e
DK
1299 visorchannel =
1300 visorchannel_create_with_lock(cmd->create_device.channel_addr,
1301 cmd->create_device.channel_bytes,
1302 GFP_KERNEL,
1303 cmd->create_device.data_type_uuid);
b32c4997
DZ
1304
1305 if (!visorchannel) {
1306 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1307 POSTCODE_SEVERITY_ERR);
1308 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1309 kfree(dev_info);
1310 dev_info = NULL;
1311 goto cleanup;
1312 }
1313 dev_info->visorchannel = visorchannel;
1314 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
12c957dc
TS
1315 if (uuid_le_cmp(cmd->create_device.data_type_uuid,
1316 spar_vhba_channel_protocol_uuid) == 0)
1317 save_crash_message(inmsg, CRASH_DEV);
1318
c60c8e26 1319 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1320 POSTCODE_SEVERITY_INFO);
c60c8e26 1321cleanup:
b4b598fd 1322 device_epilog(dev_info, segment_state_running,
12e364b9 1323 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
4da3336c 1324 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1325}
1326
1327static void
3ab47701 1328my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1329{
2ea5117b 1330 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1331 u32 bus_no = cmd->device_change_state.bus_no;
1332 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1333 struct spar_segment_state state = cmd->device_change_state.state;
a298bc0b 1334 struct visor_device *dev_info;
12e364b9
KC
1335 int rc = CONTROLVM_RESP_SUCCESS;
1336
a298bc0b 1337 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
0278a905
BR
1338 if (!dev_info) {
1339 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1340 POSTCODE_SEVERITY_ERR);
22ad57ba 1341 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1342 } else if (dev_info->state.created == 0) {
1343 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1344 POSTCODE_SEVERITY_ERR);
22ad57ba 1345 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1346 }
0278a905 1347 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1348 device_epilog(dev_info, state,
0278a905 1349 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
4da3336c 1350 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1351}
1352
1353static void
3ab47701 1354my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1355{
2ea5117b 1356 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1357 u32 bus_no = cmd->destroy_device.bus_no;
1358 u32 dev_no = cmd->destroy_device.dev_no;
a298bc0b 1359 struct visor_device *dev_info;
12e364b9
KC
1360 int rc = CONTROLVM_RESP_SUCCESS;
1361
a298bc0b 1362 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
61715c8b 1363 if (!dev_info)
22ad57ba 1364 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1365 else if (dev_info->state.created == 0)
22ad57ba 1366 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1367
61715c8b 1368 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1369 device_epilog(dev_info, segment_state_running,
12e364b9 1370 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
4da3336c 1371 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1372}
1373
1374/* When provided with the physical address of the controlvm channel
1375 * (phys_addr), the offset to the payload area we need to manage
1376 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1377 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1378 * for failure.
1379 */
1380static int
d5b3f1dc 1381initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
c1f834eb 1382 struct visor_controlvm_payload_info *info)
12e364b9 1383{
3103dc03 1384 u8 *payload = NULL;
12e364b9
KC
1385 int rc = CONTROLVM_RESP_SUCCESS;
1386
38f736e9 1387 if (!info) {
22ad57ba 1388 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1389 goto cleanup;
12e364b9 1390 }
c1f834eb 1391 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1392 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1393 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1394 goto cleanup;
12e364b9 1395 }
3103dc03 1396 payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
38f736e9 1397 if (!payload) {
22ad57ba 1398 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1399 goto cleanup;
12e364b9
KC
1400 }
1401
1402 info->offset = offset;
1403 info->bytes = bytes;
1404 info->ptr = payload;
12e364b9 1405
f118a39b 1406cleanup:
12e364b9 1407 if (rc < 0) {
f118a39b 1408 if (payload) {
3103dc03 1409 memunmap(payload);
12e364b9
KC
1410 payload = NULL;
1411 }
1412 }
1413 return rc;
1414}
1415
1416static void
c1f834eb 1417destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1418{
597c338f 1419 if (info->ptr) {
3103dc03 1420 memunmap(info->ptr);
12e364b9
KC
1421 info->ptr = NULL;
1422 }
c1f834eb 1423 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1424}
1425
1426static void
1427initialize_controlvm_payload(void)
1428{
d5b3f1dc 1429 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1430 u64 payload_offset = 0;
1431 u32 payload_bytes = 0;
26eb2c0c 1432
c3d9a224 1433 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1434 offsetof(struct spar_controlvm_channel_protocol,
1435 request_payload_offset),
cafefc0c 1436 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1437 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1438 POSTCODE_SEVERITY_ERR);
1439 return;
1440 }
c3d9a224 1441 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1442 offsetof(struct spar_controlvm_channel_protocol,
1443 request_payload_bytes),
cafefc0c 1444 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1445 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1446 POSTCODE_SEVERITY_ERR);
1447 return;
1448 }
1449 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1450 payload_offset, payload_bytes,
84982fbf 1451 &controlvm_payload_info);
12e364b9
KC
1452}
1453
1454/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1455 * Returns CONTROLVM_RESP_xxx code.
1456 */
d3368a58 1457static int
12e364b9
KC
1458visorchipset_chipset_ready(void)
1459{
eb34e877 1460 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1461 return CONTROLVM_RESP_SUCCESS;
1462}
12e364b9 1463
d3368a58 1464static int
12e364b9
KC
1465visorchipset_chipset_selftest(void)
1466{
1467 char env_selftest[20];
1468 char *envp[] = { env_selftest, NULL };
26eb2c0c 1469
12e364b9 1470 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1471 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1472 envp);
1473 return CONTROLVM_RESP_SUCCESS;
1474}
12e364b9
KC
1475
1476/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1477 * Returns CONTROLVM_RESP_xxx code.
1478 */
d3368a58 1479static int
12e364b9
KC
1480visorchipset_chipset_notready(void)
1481{
eb34e877 1482 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1483 return CONTROLVM_RESP_SUCCESS;
1484}
12e364b9
KC
1485
1486static void
77a0449d 1487chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1488{
1489 int rc = visorchipset_chipset_ready();
26eb2c0c 1490
12e364b9
KC
1491 if (rc != CONTROLVM_RESP_SUCCESS)
1492 rc = -rc;
77a0449d
BR
1493 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1494 controlvm_respond(msg_hdr, rc);
1495 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1496 /* Send CHIPSET_READY response when all modules have been loaded
1497 * and disks mounted for the partition
1498 */
77a0449d 1499 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1500 }
1501}
1502
1503static void
77a0449d 1504chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1505{
1506 int rc = visorchipset_chipset_selftest();
26eb2c0c 1507
12e364b9
KC
1508 if (rc != CONTROLVM_RESP_SUCCESS)
1509 rc = -rc;
77a0449d
BR
1510 if (msg_hdr->flags.response_expected)
1511 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1512}
1513
1514static void
77a0449d 1515chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1516{
1517 int rc = visorchipset_chipset_notready();
26eb2c0c 1518
12e364b9
KC
1519 if (rc != CONTROLVM_RESP_SUCCESS)
1520 rc = -rc;
77a0449d
BR
1521 if (msg_hdr->flags.response_expected)
1522 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1523}
1524
1525/* This is your "one-stop" shop for grabbing the next message from the
1526 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1527 */
f4c11551 1528static bool
3ab47701 1529read_controlvm_event(struct controlvm_message *msg)
12e364b9 1530{
c3d9a224 1531 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1532 CONTROLVM_QUEUE_EVENT, msg)) {
1533 /* got a message */
0aca7844 1534 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1535 return false;
1536 return true;
12e364b9 1537 }
f4c11551 1538 return false;
12e364b9
KC
1539}
1540
1541/*
1542 * The general parahotplug flow works as follows. The visorchipset
1543 * driver receives a DEVICE_CHANGESTATE message from Command
1544 * specifying a physical device to enable or disable. The CONTROLVM
1545 * message handler calls parahotplug_process_message, which then adds
1546 * the message to a global list and kicks off a udev event which
1547 * causes a user level script to enable or disable the specified
1548 * device. The udev script then writes to
1549 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1550 * to get called, at which point the appropriate CONTROLVM message is
1551 * retrieved from the list and responded to.
1552 */
1553
1554#define PARAHOTPLUG_TIMEOUT_MS 2000
1555
1556/*
1557 * Generate unique int to match an outstanding CONTROLVM message with a
1558 * udev script /proc response
1559 */
1560static int
1561parahotplug_next_id(void)
1562{
1563 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1564
12e364b9
KC
1565 return atomic_inc_return(&id);
1566}
1567
1568/*
1569 * Returns the time (in jiffies) when a CONTROLVM message on the list
1570 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1571 */
1572static unsigned long
1573parahotplug_next_expiration(void)
1574{
2cc1a1b3 1575 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1576}
1577
1578/*
1579 * Create a parahotplug_request, which is basically a wrapper for a
1580 * CONTROLVM_MESSAGE that we can stick on a list
1581 */
1582static struct parahotplug_request *
3ab47701 1583parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1584{
ea0dcfcf
QL
1585 struct parahotplug_request *req;
1586
6a55e3c3 1587 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1588 if (!req)
12e364b9
KC
1589 return NULL;
1590
1591 req->id = parahotplug_next_id();
1592 req->expiration = parahotplug_next_expiration();
1593 req->msg = *msg;
1594
1595 return req;
1596}
1597
1598/*
1599 * Free a parahotplug_request.
1600 */
1601static void
1602parahotplug_request_destroy(struct parahotplug_request *req)
1603{
1604 kfree(req);
1605}
1606
1607/*
1608 * Cause uevent to run the user level script to do the disable/enable
1609 * specified in (the CONTROLVM message in) the specified
1610 * parahotplug_request
1611 */
1612static void
1613parahotplug_request_kickoff(struct parahotplug_request *req)
1614{
2ea5117b 1615 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1616 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1617 env_func[40];
1618 char *envp[] = {
1619 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1620 };
1621
1622 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1623 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1624 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1625 cmd->device_change_state.state.active);
12e364b9 1626 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1627 cmd->device_change_state.bus_no);
12e364b9 1628 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1629 cmd->device_change_state.dev_no >> 3);
12e364b9 1630 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1631 cmd->device_change_state.dev_no & 0x7);
12e364b9 1632
eb34e877 1633 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1634 envp);
1635}
1636
1637/*
1638 * Remove any request from the list that's been on there too long and
1639 * respond with an error.
1640 */
1641static void
1642parahotplug_process_list(void)
1643{
e82ba62e
JS
1644 struct list_head *pos;
1645 struct list_head *tmp;
12e364b9 1646
ddf5de53 1647 spin_lock(&parahotplug_request_list_lock);
12e364b9 1648
ddf5de53 1649 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1650 struct parahotplug_request *req =
1651 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1652
1653 if (!time_after_eq(jiffies, req->expiration))
1654 continue;
1655
1656 list_del(pos);
1657 if (req->msg.hdr.flags.response_expected)
1658 controlvm_respond_physdev_changestate(
1659 &req->msg.hdr,
1660 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1661 req->msg.cmd.device_change_state.state);
1662 parahotplug_request_destroy(req);
12e364b9
KC
1663 }
1664
ddf5de53 1665 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1666}
1667
1668/*
1669 * Called from the /proc handler, which means the user script has
1670 * finished the enable/disable. Find the matching identifier, and
1671 * respond to the CONTROLVM message with success.
1672 */
1673static int
b06bdf7d 1674parahotplug_request_complete(int id, u16 active)
12e364b9 1675{
e82ba62e
JS
1676 struct list_head *pos;
1677 struct list_head *tmp;
12e364b9 1678
ddf5de53 1679 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1680
1681 /* Look for a request matching "id". */
ddf5de53 1682 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1683 struct parahotplug_request *req =
1684 list_entry(pos, struct parahotplug_request, list);
1685 if (req->id == id) {
1686 /* Found a match. Remove it from the list and
1687 * respond.
1688 */
1689 list_del(pos);
ddf5de53 1690 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1691 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1692 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1693 controlvm_respond_physdev_changestate(
1694 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1695 req->msg.cmd.device_change_state.state);
12e364b9
KC
1696 parahotplug_request_destroy(req);
1697 return 0;
1698 }
1699 }
1700
ddf5de53 1701 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1702 return -1;
1703}
1704
1705/*
1706 * Enables or disables a PCI device by kicking off a udev script
1707 */
bd5b9b32 1708static void
3ab47701 1709parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1710{
1711 struct parahotplug_request *req;
1712
1713 req = parahotplug_request_create(inmsg);
1714
38f736e9 1715 if (!req)
12e364b9 1716 return;
12e364b9 1717
2ea5117b 1718 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1719 /* For enable messages, just respond with success
1720 * right away. This is a bit of a hack, but there are
1721 * issues with the early enable messages we get (with
1722 * either the udev script not detecting that the device
1723 * is up, or not getting called at all). Fortunately
1724 * the messages that get lost don't matter anyway, as
1725 * devices are automatically enabled at
1726 * initialization.
1727 */
1728 parahotplug_request_kickoff(req);
1729 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1730 CONTROLVM_RESP_SUCCESS,
1731 inmsg->cmd.device_change_state.state);
12e364b9
KC
1732 parahotplug_request_destroy(req);
1733 } else {
1734 /* For disable messages, add the request to the
1735 * request list before kicking off the udev script. It
1736 * won't get responded to until the script has
1737 * indicated it's done.
1738 */
ddf5de53
BR
1739 spin_lock(&parahotplug_request_list_lock);
1740 list_add_tail(&req->list, &parahotplug_request_list);
1741 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1742
1743 parahotplug_request_kickoff(req);
1744 }
1745}
1746
12e364b9
KC
1747/* Process a controlvm message.
1748 * Return result:
779d0752 1749 * false - this function will return false only in the case where the
12e364b9
KC
1750 * controlvm message was NOT processed, but processing must be
1751 * retried before reading the next controlvm message; a
1752 * scenario where this can occur is when we need to throttle
1753 * the allocation of memory in which to copy out controlvm
1754 * payload data
f4c11551 1755 * true - processing of the controlvm message completed,
12e364b9
KC
1756 * either successfully or with an error.
1757 */
f4c11551 1758static bool
d5b3f1dc 1759handle_command(struct controlvm_message inmsg, u64 channel_addr)
12e364b9 1760{
2ea5117b 1761 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1762 u64 parm_addr;
1763 u32 parm_bytes;
317d9614 1764 struct parser_context *parser_ctx = NULL;
e82ba62e 1765 bool local_addr;
3ab47701 1766 struct controlvm_message ackmsg;
12e364b9
KC
1767
1768 /* create parsing context if necessary */
818352a8 1769 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1770 if (channel_addr == 0)
f4c11551 1771 return true;
818352a8
BR
1772 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1773 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1774
1775 /* Parameter and channel addresses within test messages actually lie
1776 * within our OS-controlled memory. We need to know that, because it
1777 * makes a difference in how we compute the virtual address.
1778 */
ebec8967 1779 if (parm_addr && parm_bytes) {
f4c11551 1780 bool retry = false;
26eb2c0c 1781
12e364b9 1782 parser_ctx =
818352a8
BR
1783 parser_init_byte_stream(parm_addr, parm_bytes,
1784 local_addr, &retry);
1b08872e 1785 if (!parser_ctx && retry)
f4c11551 1786 return false;
12e364b9
KC
1787 }
1788
818352a8 1789 if (!local_addr) {
12e364b9
KC
1790 controlvm_init_response(&ackmsg, &inmsg.hdr,
1791 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1792 if (controlvm_channel)
1793 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1794 CONTROLVM_QUEUE_ACK,
1795 &ackmsg);
12e364b9 1796 }
98d7b594 1797 switch (inmsg.hdr.id) {
12e364b9 1798 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1799 chipset_init(&inmsg);
1800 break;
1801 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1802 bus_create(&inmsg);
1803 break;
1804 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1805 bus_destroy(&inmsg);
1806 break;
1807 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1808 bus_configure(&inmsg, parser_ctx);
1809 break;
1810 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1811 my_device_create(&inmsg);
1812 break;
1813 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1814 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1815 parahotplug_process_message(&inmsg);
1816 } else {
12e364b9
KC
1817 /* save the hdr and cmd structures for later use */
1818 /* when sending back the response to Command */
1819 my_device_changestate(&inmsg);
4f44b72d 1820 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1821 break;
1822 }
1823 break;
1824 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1825 my_device_destroy(&inmsg);
1826 break;
1827 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1828 /* no op for now, just send a respond that we passed */
98d7b594 1829 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1830 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1831 break;
1832 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1833 chipset_ready(&inmsg.hdr);
1834 break;
1835 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1836 chipset_selftest(&inmsg.hdr);
1837 break;
1838 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1839 chipset_notready(&inmsg.hdr);
1840 break;
1841 default:
98d7b594 1842 if (inmsg.hdr.flags.response_expected)
12e364b9 1843 controlvm_respond(&inmsg.hdr,
818352a8 1844 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1845 break;
1846 }
1847
38f736e9 1848 if (parser_ctx) {
12e364b9
KC
1849 parser_done(parser_ctx);
1850 parser_ctx = NULL;
1851 }
f4c11551 1852 return true;
12e364b9
KC
1853}
1854
5f3a7e36
DK
1855static inline unsigned int
1856issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1857{
1858 struct vmcall_io_controlvm_addr_params params;
1859 int result = VMCALL_SUCCESS;
1860 u64 physaddr;
1861
1862 physaddr = virt_to_phys(&params);
1863 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1864 if (VMCALL_SUCCESSFUL(result)) {
1865 *control_addr = params.address;
1866 *control_bytes = params.channel_bytes;
1867 }
1868 return result;
1869}
1870
d5b3f1dc 1871static u64 controlvm_get_channel_address(void)
524b0b63 1872{
5fc0229a 1873 u64 addr = 0;
b3c55b13 1874 u32 size = 0;
524b0b63 1875
0aca7844 1876 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1877 return 0;
0aca7844 1878
524b0b63
BR
1879 return addr;
1880}
1881
12e364b9
KC
1882static void
1883controlvm_periodic_work(struct work_struct *work)
1884{
3ab47701 1885 struct controlvm_message inmsg;
f4c11551
JS
1886 bool got_command = false;
1887 bool handle_command_failed = false;
1c1ed292 1888 static u64 poll_count;
12e364b9
KC
1889
1890 /* make sure visorbus server is registered for controlvm callbacks */
4da3336c 1891 if (visorchipset_visorbusregwait && !visorbusregistered)
1c1ed292 1892 goto cleanup;
12e364b9 1893
1c1ed292
BR
1894 poll_count++;
1895 if (poll_count >= 250)
12e364b9
KC
1896 ; /* keep going */
1897 else
1c1ed292 1898 goto cleanup;
12e364b9
KC
1899
1900 /* Check events to determine if response to CHIPSET_READY
1901 * should be sent
1902 */
0639ba67
BR
1903 if (visorchipset_holdchipsetready &&
1904 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1905 if (check_chipset_events() == 1) {
da021f02 1906 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1907 clear_chipset_events();
da021f02 1908 memset(&g_chipset_msg_hdr, 0,
98d7b594 1909 sizeof(struct controlvm_message_header));
12e364b9
KC
1910 }
1911 }
1912
c3d9a224 1913 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1914 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1915 &inmsg))
1916 ;
1c1ed292 1917 if (!got_command) {
7166ed19 1918 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1919 /* we throttled processing of a prior
1920 * msg, so try to process it again
1921 * rather than reading a new one
1922 */
7166ed19 1923 inmsg = controlvm_pending_msg;
f4c11551 1924 controlvm_pending_msg_valid = false;
1c1ed292 1925 got_command = true;
75c1f8b7 1926 } else {
1c1ed292 1927 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1928 }
8a1182eb 1929 }
12e364b9 1930
f4c11551 1931 handle_command_failed = false;
1c1ed292 1932 while (got_command && (!handle_command_failed)) {
b53e0e93 1933 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1934 if (handle_command(inmsg,
1935 visorchannel_get_physaddr
c3d9a224 1936 (controlvm_channel)))
1c1ed292 1937 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1938 else {
1939 /* this is a scenario where throttling
1940 * is required, but probably NOT an
1941 * error...; we stash the current
1942 * controlvm msg so we will attempt to
1943 * reprocess it on our next loop
1944 */
f4c11551 1945 handle_command_failed = true;
7166ed19 1946 controlvm_pending_msg = inmsg;
f4c11551 1947 controlvm_pending_msg_valid = true;
12e364b9
KC
1948 }
1949 }
1950
1951 /* parahotplug_worker */
1952 parahotplug_process_list();
1953
1c1ed292 1954cleanup:
12e364b9
KC
1955
1956 if (time_after(jiffies,
b53e0e93 1957 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1958 /* it's been longer than MIN_IDLE_SECONDS since we
1959 * processed our last controlvm message; slow down the
1960 * polling
1961 */
911e213e
BR
1962 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1963 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1964 } else {
911e213e
BR
1965 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1966 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1967 }
1968
0bde2979 1969 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1970}
1971
1972static void
1973setup_crash_devices_work_queue(struct work_struct *work)
1974{
e6bdb904
BR
1975 struct controlvm_message local_crash_bus_msg;
1976 struct controlvm_message local_crash_dev_msg;
3ab47701 1977 struct controlvm_message msg;
e6bdb904
BR
1978 u32 local_crash_msg_offset;
1979 u16 local_crash_msg_count;
12e364b9 1980
4da3336c
DK
1981 /* make sure visorbus is registered for controlvm callbacks */
1982 if (visorchipset_visorbusregwait && !visorbusregistered)
e6bdb904 1983 goto cleanup;
12e364b9
KC
1984
1985 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1986
1987 /* send init chipset msg */
98d7b594 1988 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1989 msg.cmd.init_chipset.bus_count = 23;
1990 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1991
1992 chipset_init(&msg);
1993
12e364b9 1994 /* get saved message count */
c3d9a224 1995 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1996 offsetof(struct spar_controlvm_channel_protocol,
1997 saved_crash_message_count),
e6bdb904 1998 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1999 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2000 POSTCODE_SEVERITY_ERR);
2001 return;
2002 }
2003
e6bdb904 2004 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 2005 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 2006 local_crash_msg_count,
12e364b9
KC
2007 POSTCODE_SEVERITY_ERR);
2008 return;
2009 }
2010
2011 /* get saved crash message offset */
c3d9a224 2012 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2013 offsetof(struct spar_controlvm_channel_protocol,
2014 saved_crash_message_offset),
e6bdb904 2015 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
2016 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2017 POSTCODE_SEVERITY_ERR);
2018 return;
2019 }
2020
2021 /* read create device message for storage bus offset */
c3d9a224 2022 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
2023 local_crash_msg_offset,
2024 &local_crash_bus_msg,
3ab47701 2025 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2026 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2027 POSTCODE_SEVERITY_ERR);
2028 return;
2029 }
2030
2031 /* read create device message for storage device */
c3d9a224 2032 if (visorchannel_read(controlvm_channel,
e6bdb904 2033 local_crash_msg_offset +
3ab47701 2034 sizeof(struct controlvm_message),
e6bdb904 2035 &local_crash_dev_msg,
3ab47701 2036 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2037 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2038 POSTCODE_SEVERITY_ERR);
2039 return;
2040 }
2041
2042 /* reuse IOVM create bus message */
ebec8967 2043 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 2044 bus_create(&local_crash_bus_msg);
75c1f8b7 2045 } else {
12e364b9
KC
2046 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2047 POSTCODE_SEVERITY_ERR);
2048 return;
2049 }
2050
2051 /* reuse create device message for storage device */
ebec8967 2052 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 2053 my_device_create(&local_crash_dev_msg);
75c1f8b7 2054 } else {
12e364b9
KC
2055 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2056 POSTCODE_SEVERITY_ERR);
2057 return;
2058 }
12e364b9
KC
2059 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2060 return;
2061
e6bdb904 2062cleanup:
12e364b9 2063
911e213e 2064 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2065
0bde2979 2066 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2067}
2068
2069static void
d32517e3 2070bus_create_response(struct visor_device *bus_info, int response)
12e364b9 2071{
4b4fd43a 2072 if (response >= 0)
0274b5ae 2073 bus_info->state.created = 1;
0274b5ae
DZ
2074
2075 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2076 response);
2077
2078 kfree(bus_info->pending_msg_hdr);
2079 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
2080}
2081
2082static void
d32517e3 2083bus_destroy_response(struct visor_device *bus_info, int response)
12e364b9 2084{
0274b5ae
DZ
2085 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2086 response);
2087
2088 kfree(bus_info->pending_msg_hdr);
2089 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
2090}
2091
2092static void
a298bc0b 2093device_create_response(struct visor_device *dev_info, int response)
12e364b9 2094{
0274b5ae
DZ
2095 if (response >= 0)
2096 dev_info->state.created = 1;
2097
2098 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2099 response);
2100
2101 kfree(dev_info->pending_msg_hdr);
addce19f 2102 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2103}
2104
2105static void
a298bc0b 2106device_destroy_response(struct visor_device *dev_info, int response)
12e364b9 2107{
0274b5ae
DZ
2108 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2109 response);
2110
2111 kfree(dev_info->pending_msg_hdr);
2112 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2113}
2114
d3368a58 2115static void
a298bc0b 2116visorchipset_device_pause_response(struct visor_device *dev_info,
b4b598fd 2117 int response)
12e364b9 2118{
12e364b9 2119 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 2120 dev_info, response,
bd0d2dcc 2121 segment_state_standby);
0274b5ae
DZ
2122
2123 kfree(dev_info->pending_msg_hdr);
2124 dev_info->pending_msg_hdr = NULL;
12e364b9 2125}
12e364b9
KC
2126
2127static void
a298bc0b 2128device_resume_response(struct visor_device *dev_info, int response)
12e364b9
KC
2129{
2130 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 2131 dev_info, response,
bd0d2dcc 2132 segment_state_running);
0274b5ae
DZ
2133
2134 kfree(dev_info->pending_msg_hdr);
2135 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2136}
2137
18b87ed1 2138static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2139 struct device_attribute *attr,
2140 const char *buf, size_t count)
12e364b9 2141{
18b87ed1 2142 char msgtype[64];
12e364b9 2143
66e24b76
BR
2144 if (sscanf(buf, "%63s", msgtype) != 1)
2145 return -EINVAL;
2146
ebec8967 2147 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2148 chipset_events[0] = 1;
2149 return count;
ebec8967 2150 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2151 chipset_events[1] = 1;
2152 return count;
e22a4a0f
BR
2153 }
2154 return -EINVAL;
12e364b9
KC
2155}
2156
e56fa7cd
BR
2157/* The parahotplug/devicedisabled interface gets called by our support script
2158 * when an SR-IOV device has been shut down. The ID is passed to the script
2159 * and then passed back when the device has been removed.
2160 */
2161static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2162 struct device_attribute *attr,
2163 const char *buf, size_t count)
e56fa7cd 2164{
94217363 2165 unsigned int id;
e56fa7cd 2166
ebec8967 2167 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2168 return -EINVAL;
2169
2170 parahotplug_request_complete(id, 0);
2171 return count;
2172}
2173
2174/* The parahotplug/deviceenabled interface gets called by our support script
2175 * when an SR-IOV device has been recovered. The ID is passed to the script
2176 * and then passed back when the device has been brought back up.
2177 */
2178static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2179 struct device_attribute *attr,
2180 const char *buf, size_t count)
e56fa7cd 2181{
94217363 2182 unsigned int id;
e56fa7cd 2183
ebec8967 2184 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2185 return -EINVAL;
2186
2187 parahotplug_request_complete(id, 1);
2188 return count;
2189}
2190
e3420ed6
EA
2191static int
2192visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2193{
2194 unsigned long physaddr = 0;
2195 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
780fcad3 2196 u64 addr = 0;
e3420ed6
EA
2197
2198 /* sv_enable_dfp(); */
2199 if (offset & (PAGE_SIZE - 1))
2200 return -ENXIO; /* need aligned offsets */
2201
2202 switch (offset) {
2203 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2204 vma->vm_flags |= VM_IO;
2205 if (!*file_controlvm_channel)
2206 return -ENXIO;
2207
2208 visorchannel_read(*file_controlvm_channel,
2209 offsetof(struct spar_controlvm_channel_protocol,
2210 gp_control_channel),
2211 &addr, sizeof(addr));
2212 if (!addr)
2213 return -ENXIO;
2214
2215 physaddr = (unsigned long)addr;
2216 if (remap_pfn_range(vma, vma->vm_start,
2217 physaddr >> PAGE_SHIFT,
2218 vma->vm_end - vma->vm_start,
2219 /*pgprot_noncached */
2220 (vma->vm_page_prot))) {
2221 return -EAGAIN;
2222 }
2223 break;
2224 default:
2225 return -ENXIO;
2226 }
2227 return 0;
2228}
2229
5f3a7e36
DK
2230static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2231{
2232 u64 result = VMCALL_SUCCESS;
2233 u64 physaddr = 0;
2234
2235 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2236 result);
2237 return result;
2238}
2239
2240static inline int issue_vmcall_update_physical_time(u64 adjustment)
2241{
2242 int result = VMCALL_SUCCESS;
2243
2244 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2245 return result;
2246}
2247
e3420ed6
EA
2248static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2249 unsigned long arg)
2250{
2500276e 2251 u64 adjustment;
e3420ed6
EA
2252 s64 vrtc_offset;
2253
2254 switch (cmd) {
2255 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2256 /* get the physical rtc offset */
2257 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2258 if (copy_to_user((void __user *)arg, &vrtc_offset,
2259 sizeof(vrtc_offset))) {
2260 return -EFAULT;
2261 }
d5b3f1dc 2262 return 0;
e3420ed6
EA
2263 case VMCALL_UPDATE_PHYSICAL_TIME:
2264 if (copy_from_user(&adjustment, (void __user *)arg,
2265 sizeof(adjustment))) {
2266 return -EFAULT;
2267 }
2268 return issue_vmcall_update_physical_time(adjustment);
2269 default:
2270 return -EFAULT;
2271 }
2272}
2273
2274static const struct file_operations visorchipset_fops = {
2275 .owner = THIS_MODULE,
2276 .open = visorchipset_open,
2277 .read = NULL,
2278 .write = NULL,
2279 .unlocked_ioctl = visorchipset_ioctl,
2280 .release = visorchipset_release,
2281 .mmap = visorchipset_mmap,
2282};
2283
0f570fc0 2284static int
e3420ed6
EA
2285visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2286{
2287 int rc = 0;
2288
2289 file_controlvm_channel = controlvm_channel;
2290 cdev_init(&file_cdev, &visorchipset_fops);
2291 file_cdev.owner = THIS_MODULE;
2292 if (MAJOR(major_dev) == 0) {
46168810 2293 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
2294 /* dynamic major device number registration required */
2295 if (rc < 0)
2296 return rc;
2297 } else {
2298 /* static major device number registration required */
46168810 2299 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
2300 if (rc < 0)
2301 return rc;
2302 }
2303 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2304 if (rc < 0) {
2305 unregister_chrdev_region(major_dev, 1);
2306 return rc;
2307 }
2308 return 0;
2309}
2310
55c67dca
PB
2311static int
2312visorchipset_init(struct acpi_device *acpi_device)
12e364b9 2313{
33078257 2314 int rc = 0;
d5b3f1dc 2315 u64 addr;
d3368a58
JS
2316 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2317
2318 addr = controlvm_get_channel_address();
2319 if (!addr)
2320 return -ENODEV;
12e364b9 2321
4da3336c 2322 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
84982fbf 2323 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
12e364b9 2324
c732623b 2325 controlvm_channel = visorchannel_create_with_lock(addr, 0,
d3368a58 2326 GFP_KERNEL, uuid);
c732623b
TS
2327 if (!controlvm_channel)
2328 return -ENODEV;
d3368a58
JS
2329 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2330 visorchannel_get_header(controlvm_channel))) {
2331 initialize_controlvm_payload();
8a1182eb 2332 } else {
d3368a58
JS
2333 visorchannel_destroy(controlvm_channel);
2334 controlvm_channel = NULL;
8a1182eb
BR
2335 return -ENODEV;
2336 }
2337
5aa8ae57
BR
2338 major_dev = MKDEV(visorchipset_major, 0);
2339 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2340 if (rc < 0) {
4cb005a9 2341 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2342 goto cleanup;
4cb005a9 2343 }
9f8d0e8b 2344
da021f02 2345 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2346
4da3336c
DK
2347 /* if booting in a crash kernel */
2348 if (is_kdump_kernel())
2349 INIT_DELAYED_WORK(&periodic_controlvm_work,
2350 setup_crash_devices_work_queue);
2351 else
2352 INIT_DELAYED_WORK(&periodic_controlvm_work,
2353 controlvm_periodic_work);
4da3336c 2354
4da3336c
DK
2355 most_recent_message_jiffies = jiffies;
2356 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
0bde2979 2357 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
12e364b9 2358
eb34e877
BR
2359 visorchipset_platform_device.dev.devt = major_dev;
2360 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9 2361 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
fbd91616 2362 rc = -ENODEV;
a6a3989b 2363 goto cleanup;
4cb005a9 2364 }
12e364b9 2365 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7
PB
2366
2367 rc = visorbus_init();
a6a3989b 2368cleanup:
12e364b9 2369 if (rc) {
12e364b9
KC
2370 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2371 POSTCODE_SEVERITY_ERR);
2372 }
2373 return rc;
2374}
2375
0f570fc0 2376static void
e3420ed6
EA
2377visorchipset_file_cleanup(dev_t major_dev)
2378{
2379 if (file_cdev.ops)
2380 cdev_del(&file_cdev);
2381 file_cdev.ops = NULL;
2382 unregister_chrdev_region(major_dev, 1);
2383}
2384
55c67dca
PB
2385static int
2386visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 2387{
12e364b9
KC
2388 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2389
c79b28f7
PB
2390 visorbus_exit();
2391
0bde2979 2392 cancel_delayed_work_sync(&periodic_controlvm_work);
4da3336c 2393 destroy_controlvm_payload_info(&controlvm_payload_info);
1783319f 2394
da021f02 2395 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2396
c3d9a224 2397 visorchannel_destroy(controlvm_channel);
8a1182eb 2398
addceb12 2399 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
04dacacc 2400 platform_device_unregister(&visorchipset_platform_device);
12e364b9 2401 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
55c67dca
PB
2402
2403 return 0;
2404}
2405
2406static const struct acpi_device_id unisys_device_ids[] = {
2407 {"PNP0A07", 0},
2408 {"", 0},
2409};
55c67dca
PB
2410
2411static struct acpi_driver unisys_acpi_driver = {
2412 .name = "unisys_acpi",
2413 .class = "unisys_acpi_class",
2414 .owner = THIS_MODULE,
2415 .ids = unisys_device_ids,
2416 .ops = {
2417 .add = visorchipset_init,
2418 .remove = visorchipset_exit,
2419 },
2420};
1fc07f99
DK
2421
2422MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2423
d5b3f1dc
EA
2424static __init uint32_t visorutil_spar_detect(void)
2425{
2426 unsigned int eax, ebx, ecx, edx;
2427
0c9f3536 2428 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
d5b3f1dc
EA
2429 /* check the ID */
2430 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2431 return (ebx == UNISYS_SPAR_ID_EBX) &&
2432 (ecx == UNISYS_SPAR_ID_ECX) &&
2433 (edx == UNISYS_SPAR_ID_EDX);
2434 } else {
2435 return 0;
2436 }
2437}
55c67dca
PB
2438
2439static int init_unisys(void)
2440{
2441 int result;
35e606de 2442
d5b3f1dc 2443 if (!visorutil_spar_detect())
55c67dca
PB
2444 return -ENODEV;
2445
2446 result = acpi_bus_register_driver(&unisys_acpi_driver);
2447 if (result)
2448 return -ENODEV;
2449
2450 pr_info("Unisys Visorchipset Driver Loaded.\n");
2451 return 0;
2452};
2453
2454static void exit_unisys(void)
2455{
2456 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
2457}
2458
12e364b9 2459module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2460MODULE_PARM_DESC(visorchipset_major,
2461 "major device number to use for the device node");
4da3336c
DK
2462module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2463MODULE_PARM_DESC(visorchipset_visorbusreqwait,
12e364b9 2464 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2465module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2466 int, S_IRUGO);
2467MODULE_PARM_DESC(visorchipset_holdchipsetready,
2468 "1 to hold response to CHIPSET_READY");
b615d628 2469
55c67dca
PB
2470module_init(init_unisys);
2471module_exit(exit_unisys);
12e364b9
KC
2472
2473MODULE_AUTHOR("Unisys");
2474MODULE_LICENSE("GPL");
2475MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2476 VERSION);
2477MODULE_VERSION(VERSION);