3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode. As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies; /* when we got our last
53 * controlvm message */
62 static int serverregistered;
63 static int clientregistered;
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86 (((uuid_le_cmp(channel_type_guid,\
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
89 spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
92 #define is_diagpool_channel(channel_type_guid) \
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
99 static struct visorchannel *controlvm_channel;
101 struct controlvm_payload_info {
102 u8 __iomem *ptr; /* pointer to base address of payload pool */
103 u64 offset; /* offset from beginning of controlvm
104 * channel to beginning of payload * pool */
105 u32 bytes; /* number of bytes in payload pool */
108 /* Manages the request payload in the controlvm channel */
109 static struct controlvm_payload_info ControlVm_payload_info;
111 struct livedump_info {
112 struct controlvm_message_header Dumpcapture_header;
113 struct controlvm_message_header Gettextdump_header;
114 struct controlvm_message_header Dumpcomplete_header;
115 BOOL Gettextdump_outstanding;
118 atomic_t buffers_in_use;
121 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
122 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
124 static struct livedump_info LiveDump_info;
126 /* The following globals are used to handle the scenario where we are unable to
127 * offload the payload from a controlvm message due to memory requirements. In
128 * this scenario, we simply stash the controlvm message, then attempt to
129 * process it again the next time controlvm_periodic_work() runs.
131 static struct controlvm_message ControlVm_Pending_Msg;
132 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
134 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
135 * TRANSMIT_FILE PutFile payloads.
137 static struct kmem_cache *Putfile_buffer_list_pool;
138 static const char Putfile_buffer_list_pool_name[] =
139 "controlvm_putfile_buffer_list_pool";
141 /* This identifies a data buffer that has been received via a controlvm messages
142 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
144 struct putfile_buffer_entry {
145 struct list_head next; /* putfile_buffer_entry list */
146 struct parser_context *parser_ctx; /* points to input data buffer */
149 /* List of struct putfile_request *, via next_putfile_request member.
150 * Each entry in this list identifies an outstanding TRANSMIT_FILE
153 static LIST_HEAD(Putfile_request_list);
155 /* This describes a buffer and its current state of transfer (e.g., how many
156 * bytes have already been supplied as putfile data, and how many bytes are
157 * remaining) for a putfile_request.
159 struct putfile_active_buffer {
160 /* a payload from a controlvm message, containing a file data buffer */
161 struct parser_context *parser_ctx;
162 /* points within data area of parser_ctx to next byte of data */
164 /* # bytes left from <pnext> to the end of this data buffer */
165 size_t bytes_remaining;
168 #define PUTFILE_REQUEST_SIG 0x0906101302281211
169 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
170 * conversation. Structs of this type are dynamically linked into
171 * <Putfile_request_list>.
173 struct putfile_request {
174 u64 sig; /* PUTFILE_REQUEST_SIG */
176 /* header from original TransmitFile request */
177 struct controlvm_message_header controlvm_header;
178 u64 file_request_number; /* from original TransmitFile request */
180 /* link to next struct putfile_request */
181 struct list_head next_putfile_request;
183 /* most-recent sequence number supplied via a controlvm message */
184 u64 data_sequence_number;
186 /* head of putfile_buffer_entry list, which describes the data to be
187 * supplied as putfile data;
188 * - this list is added to when controlvm messages come in that supply
190 * - this list is removed from via the hotplug program that is actually
191 * consuming these buffers to write as file data */
192 struct list_head input_buffer_list;
193 spinlock_t req_list_lock; /* lock for input_buffer_list */
195 /* waiters for input_buffer_list to go non-empty */
196 wait_queue_head_t input_buffer_wq;
198 /* data not yet read within current putfile_buffer_entry */
199 struct putfile_active_buffer active_buf;
201 /* <0 = failed, 0 = in-progress, >0 = successful; */
202 /* note that this must be set with req_list_lock, and if you set <0, */
203 /* it is your responsibility to also free up all of the other objects */
204 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
205 /* before releasing the lock */
206 int completion_status;
209 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
211 struct parahotplug_request {
212 struct list_head list;
214 unsigned long expiration;
215 struct controlvm_message msg;
218 static LIST_HEAD(Parahotplug_request_list);
219 static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
220 static void parahotplug_process_list(void);
222 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
223 * CONTROLVM_REPORTEVENT.
225 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
226 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
228 static void bus_create_response(ulong busNo, int response);
229 static void bus_destroy_response(ulong busNo, int response);
230 static void device_create_response(ulong busNo, ulong devNo, int response);
231 static void device_destroy_response(ulong busNo, ulong devNo, int response);
232 static void device_resume_response(ulong busNo, ulong devNo, int response);
234 static struct visorchipset_busdev_responders BusDev_Responders = {
235 .bus_create = bus_create_response,
236 .bus_destroy = bus_destroy_response,
237 .device_create = device_create_response,
238 .device_destroy = device_destroy_response,
239 .device_pause = visorchipset_device_pause_response,
240 .device_resume = device_resume_response,
243 /* info for /dev/visorchipset */
244 static dev_t MajorDev = -1; /**< indicates major num for device */
246 /* prototypes for attributes */
247 static ssize_t toolaction_show(struct device *dev,
248 struct device_attribute *attr, char *buf);
249 static ssize_t toolaction_store(struct device *dev,
250 struct device_attribute *attr, const char *buf, size_t count);
251 static DEVICE_ATTR_RW(toolaction);
253 static ssize_t boottotool_show(struct device *dev,
254 struct device_attribute *attr, char *buf);
255 static ssize_t boottotool_store(struct device *dev,
256 struct device_attribute *attr, const char *buf, size_t count);
257 static DEVICE_ATTR_RW(boottotool);
259 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
261 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
262 const char *buf, size_t count);
263 static DEVICE_ATTR_RW(error);
265 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
267 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
268 const char *buf, size_t count);
269 static DEVICE_ATTR_RW(textid);
271 static ssize_t remaining_steps_show(struct device *dev,
272 struct device_attribute *attr, char *buf);
273 static ssize_t remaining_steps_store(struct device *dev,
274 struct device_attribute *attr, const char *buf, size_t count);
275 static DEVICE_ATTR_RW(remaining_steps);
277 static ssize_t chipsetready_store(struct device *dev,
278 struct device_attribute *attr, const char *buf, size_t count);
279 static DEVICE_ATTR_WO(chipsetready);
281 static ssize_t devicedisabled_store(struct device *dev,
282 struct device_attribute *attr, const char *buf, size_t count);
283 static DEVICE_ATTR_WO(devicedisabled);
285 static ssize_t deviceenabled_store(struct device *dev,
286 struct device_attribute *attr, const char *buf, size_t count);
287 static DEVICE_ATTR_WO(deviceenabled);
289 static struct attribute *visorchipset_install_attrs[] = {
290 &dev_attr_toolaction.attr,
291 &dev_attr_boottotool.attr,
292 &dev_attr_error.attr,
293 &dev_attr_textid.attr,
294 &dev_attr_remaining_steps.attr,
298 static struct attribute_group visorchipset_install_group = {
300 .attrs = visorchipset_install_attrs
303 static struct attribute *visorchipset_guest_attrs[] = {
304 &dev_attr_chipsetready.attr,
308 static struct attribute_group visorchipset_guest_group = {
310 .attrs = visorchipset_guest_attrs
313 static struct attribute *visorchipset_parahotplug_attrs[] = {
314 &dev_attr_devicedisabled.attr,
315 &dev_attr_deviceenabled.attr,
319 static struct attribute_group visorchipset_parahotplug_group = {
320 .name = "parahotplug",
321 .attrs = visorchipset_parahotplug_attrs
324 static const struct attribute_group *visorchipset_dev_groups[] = {
325 &visorchipset_install_group,
326 &visorchipset_guest_group,
327 &visorchipset_parahotplug_group,
331 /* /sys/devices/platform/visorchipset */
332 static struct platform_device Visorchipset_platform_device = {
333 .name = "visorchipset",
335 .dev.groups = visorchipset_dev_groups,
338 /* Function prototypes */
339 static void controlvm_respond(struct controlvm_message_header *msgHdr,
341 static void controlvm_respond_chipset_init(
342 struct controlvm_message_header *msgHdr, int response,
343 enum ultra_chipset_feature features);
344 static void controlvm_respond_physdev_changestate(
345 struct controlvm_message_header *msgHdr, int response,
346 struct spar_segment_state state);
348 static ssize_t toolaction_show(struct device *dev,
349 struct device_attribute *attr,
354 visorchannel_read(controlvm_channel,
355 offsetof(struct spar_controlvm_channel_protocol,
356 tool_action), &toolAction, sizeof(u8));
357 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
360 static ssize_t toolaction_store(struct device *dev,
361 struct device_attribute *attr,
362 const char *buf, size_t count)
367 if (kstrtou8(buf, 10, &toolAction) != 0)
370 ret = visorchannel_write(controlvm_channel,
371 offsetof(struct spar_controlvm_channel_protocol, tool_action),
372 &toolAction, sizeof(u8));
379 static ssize_t boottotool_show(struct device *dev,
380 struct device_attribute *attr,
383 struct efi_spar_indication efiSparIndication;
385 visorchannel_read(controlvm_channel,
386 offsetof(struct spar_controlvm_channel_protocol,
387 efi_spar_ind), &efiSparIndication,
388 sizeof(struct efi_spar_indication));
389 return scnprintf(buf, PAGE_SIZE, "%u\n",
390 efiSparIndication.boot_to_tool);
393 static ssize_t boottotool_store(struct device *dev,
394 struct device_attribute *attr,
395 const char *buf, size_t count)
398 struct efi_spar_indication efiSparIndication;
400 if (kstrtoint(buf, 10, &val) != 0)
403 efiSparIndication.boot_to_tool = val;
404 ret = visorchannel_write(controlvm_channel,
405 offsetof(struct spar_controlvm_channel_protocol,
407 &(efiSparIndication),
408 sizeof(struct efi_spar_indication));
415 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
420 visorchannel_read(controlvm_channel, offsetof(
421 struct spar_controlvm_channel_protocol, installation_error),
422 &error, sizeof(u32));
423 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
426 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
427 const char *buf, size_t count)
432 if (kstrtou32(buf, 10, &error) != 0)
435 ret = visorchannel_write(controlvm_channel,
436 offsetof(struct spar_controlvm_channel_protocol,
438 &error, sizeof(u32));
444 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
449 visorchannel_read(controlvm_channel, offsetof(
450 struct spar_controlvm_channel_protocol, installation_text_id),
451 &textId, sizeof(u32));
452 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
455 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
456 const char *buf, size_t count)
461 if (kstrtou32(buf, 10, &textId) != 0)
464 ret = visorchannel_write(controlvm_channel,
465 offsetof(struct spar_controlvm_channel_protocol,
466 installation_text_id),
467 &textId, sizeof(u32));
474 static ssize_t remaining_steps_show(struct device *dev,
475 struct device_attribute *attr, char *buf)
479 visorchannel_read(controlvm_channel,
480 offsetof(struct spar_controlvm_channel_protocol,
481 installation_remaining_steps),
484 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
487 static ssize_t remaining_steps_store(struct device *dev,
488 struct device_attribute *attr, const char *buf, size_t count)
493 if (kstrtou16(buf, 10, &remainingSteps) != 0)
496 ret = visorchannel_write(controlvm_channel,
497 offsetof(struct spar_controlvm_channel_protocol,
498 installation_remaining_steps),
499 &remainingSteps, sizeof(u16));
509 wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
510 char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
511 wchar_t unicode2[99];
513 /* NOTE: Either due to a bug, or feature I don't understand, the
514 * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
515 * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
518 LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
519 LOGINF("utf8_wcstombs=%d",
520 chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
522 s[chrs] = '\0'; /* GRRRRRRRR */
524 LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
526 unicode2[chrs] = 0; /* GRRRRRRRR */
527 if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
528 LOGINF("strings match... good");
530 LOGINF("strings did not match!!");
535 busInfo_clear(void *v)
537 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
539 if (p->proc_object) {
540 visor_proc_DestroyObject(p->proc_object);
541 p->proc_object = NULL;
546 kfree(p->description);
547 p->description = NULL;
549 p->state.created = 0;
550 memset(p, 0, sizeof(struct visorchipset_bus_info));
554 devInfo_clear(void *v)
556 struct visorchipset_device_info *p =
557 (struct visorchipset_device_info *)(v);
559 p->state.created = 0;
560 memset(p, 0, sizeof(struct visorchipset_device_info));
564 check_chipset_events(void)
568 /* Check events to determine if response should be sent */
569 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
570 send_msg &= chipset_events[i];
575 clear_chipset_events(void)
578 /* Clear chipset_events */
579 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
580 chipset_events[i] = 0;
584 visorchipset_register_busdev_server(
585 struct visorchipset_busdev_notifiers *notifiers,
586 struct visorchipset_busdev_responders *responders,
587 struct ultra_vbus_deviceinfo *driver_info)
589 down(¬ifier_lock);
590 if (notifiers == NULL) {
591 memset(&BusDev_Server_Notifiers, 0,
592 sizeof(BusDev_Server_Notifiers));
593 serverregistered = 0; /* clear flag */
595 BusDev_Server_Notifiers = *notifiers;
596 serverregistered = 1; /* set flag */
599 *responders = BusDev_Responders;
601 bus_device_info_init(driver_info, "chipset", "visorchipset",
606 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
609 visorchipset_register_busdev_client(
610 struct visorchipset_busdev_notifiers *notifiers,
611 struct visorchipset_busdev_responders *responders,
612 struct ultra_vbus_deviceinfo *driver_info)
614 down(¬ifier_lock);
615 if (notifiers == NULL) {
616 memset(&BusDev_Client_Notifiers, 0,
617 sizeof(BusDev_Client_Notifiers));
618 clientregistered = 0; /* clear flag */
620 BusDev_Client_Notifiers = *notifiers;
621 clientregistered = 1; /* set flag */
624 *responders = BusDev_Responders;
626 bus_device_info_init(driver_info, "chipset(bolts)",
627 "visorchipset", VERSION, NULL);
630 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
633 cleanup_controlvm_structures(void)
635 struct visorchipset_bus_info *bi, *tmp_bi;
636 struct visorchipset_device_info *di, *tmp_di;
638 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
640 list_del(&bi->entry);
644 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
646 list_del(&di->entry);
652 chipset_init(struct controlvm_message *inmsg)
654 static int chipset_inited;
655 enum ultra_chipset_feature features = 0;
656 int rc = CONTROLVM_RESP_SUCCESS;
658 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
659 if (chipset_inited) {
660 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
664 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
666 /* Set features to indicate we support parahotplug (if Command
667 * also supports it). */
669 inmsg->cmd.init_chipset.
670 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
672 /* Set the "reply" bit so Command knows this is a
673 * features-aware driver. */
674 features |= ULTRA_CHIPSET_FEATURE_REPLY;
678 cleanup_controlvm_structures();
679 if (inmsg->hdr.flags.response_expected)
680 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
684 controlvm_init_response(struct controlvm_message *msg,
685 struct controlvm_message_header *msgHdr, int response)
687 memset(msg, 0, sizeof(struct controlvm_message));
688 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
689 msg->hdr.payload_bytes = 0;
690 msg->hdr.payload_vm_offset = 0;
691 msg->hdr.payload_max_bytes = 0;
693 msg->hdr.flags.failed = 1;
694 msg->hdr.completion_status = (u32) (-response);
699 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
701 struct controlvm_message outmsg;
703 controlvm_init_response(&outmsg, msgHdr, response);
704 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
705 * back the deviceChangeState structure in the packet. */
706 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
707 g_devicechangestate_packet.device_change_state.bus_no ==
709 g_devicechangestate_packet.device_change_state.dev_no ==
711 outmsg.cmd = g_devicechangestate_packet;
712 if (outmsg.hdr.flags.test_message == 1)
715 if (!visorchannel_signalinsert(controlvm_channel,
716 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
722 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
724 enum ultra_chipset_feature features)
726 struct controlvm_message outmsg;
728 controlvm_init_response(&outmsg, msgHdr, response);
729 outmsg.cmd.init_chipset.features = features;
730 if (!visorchannel_signalinsert(controlvm_channel,
731 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
736 static void controlvm_respond_physdev_changestate(
737 struct controlvm_message_header *msgHdr, int response,
738 struct spar_segment_state state)
740 struct controlvm_message outmsg;
742 controlvm_init_response(&outmsg, msgHdr, response);
743 outmsg.cmd.device_change_state.state = state;
744 outmsg.cmd.device_change_state.flags.phys_device = 1;
745 if (!visorchannel_signalinsert(controlvm_channel,
746 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
752 visorchipset_save_message(struct controlvm_message *msg,
753 enum crash_obj_type type)
755 u32 localSavedCrashMsgOffset;
756 u16 localSavedCrashMsgCount;
758 /* get saved message count */
759 if (visorchannel_read(controlvm_channel,
760 offsetof(struct spar_controlvm_channel_protocol,
761 saved_crash_message_count),
762 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
763 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
764 POSTCODE_SEVERITY_ERR);
768 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
769 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
770 localSavedCrashMsgCount,
771 POSTCODE_SEVERITY_ERR);
775 /* get saved crash message offset */
776 if (visorchannel_read(controlvm_channel,
777 offsetof(struct spar_controlvm_channel_protocol,
778 saved_crash_message_offset),
779 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
780 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
781 POSTCODE_SEVERITY_ERR);
785 if (type == CRASH_BUS) {
786 if (visorchannel_write(controlvm_channel,
787 localSavedCrashMsgOffset,
789 sizeof(struct controlvm_message)) < 0) {
790 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
791 POSTCODE_SEVERITY_ERR);
795 if (visorchannel_write(controlvm_channel,
796 localSavedCrashMsgOffset +
797 sizeof(struct controlvm_message), msg,
798 sizeof(struct controlvm_message)) < 0) {
799 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
800 POSTCODE_SEVERITY_ERR);
805 EXPORT_SYMBOL_GPL(visorchipset_save_message);
808 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
810 struct visorchipset_bus_info *p = NULL;
811 BOOL need_clear = FALSE;
813 p = findbus(&bus_info_list, busNo);
818 if ((cmdId == CONTROLVM_BUS_CREATE) &&
819 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
820 /* undo the row we just created... */
821 delbusdevices(&dev_info_list, busNo);
823 if (cmdId == CONTROLVM_BUS_CREATE)
824 p->state.created = 1;
825 if (cmdId == CONTROLVM_BUS_DESTROY)
829 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
830 return; /* no controlvm response needed */
831 if (p->pending_msg_hdr.id != (u32) cmdId)
833 controlvm_respond(&p->pending_msg_hdr, response);
834 p->pending_msg_hdr.id = CONTROLVM_INVALID;
837 delbusdevices(&dev_info_list, busNo);
842 device_changestate_responder(enum controlvm_id cmdId,
843 ulong busNo, ulong devNo, int response,
844 struct spar_segment_state responseState)
846 struct visorchipset_device_info *p = NULL;
847 struct controlvm_message outmsg;
849 p = finddevice(&dev_info_list, busNo, devNo);
852 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
853 return; /* no controlvm response needed */
854 if (p->pending_msg_hdr.id != cmdId)
857 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
859 outmsg.cmd.device_change_state.bus_no = busNo;
860 outmsg.cmd.device_change_state.dev_no = devNo;
861 outmsg.cmd.device_change_state.state = responseState;
863 if (!visorchannel_signalinsert(controlvm_channel,
864 CONTROLVM_QUEUE_REQUEST, &outmsg))
867 p->pending_msg_hdr.id = CONTROLVM_INVALID;
871 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
874 struct visorchipset_device_info *p = NULL;
875 BOOL need_clear = FALSE;
877 p = finddevice(&dev_info_list, busNo, devNo);
881 if (cmdId == CONTROLVM_DEVICE_CREATE)
882 p->state.created = 1;
883 if (cmdId == CONTROLVM_DEVICE_DESTROY)
887 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
888 return; /* no controlvm response needed */
890 if (p->pending_msg_hdr.id != (u32) cmdId)
893 controlvm_respond(&p->pending_msg_hdr, response);
894 p->pending_msg_hdr.id = CONTROLVM_INVALID;
900 bus_epilog(u32 busNo,
901 u32 cmd, struct controlvm_message_header *msgHdr,
902 int response, BOOL needResponse)
904 BOOL notified = FALSE;
906 struct visorchipset_bus_info *pBusInfo = findbus(&bus_info_list, busNo);
912 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
913 sizeof(struct controlvm_message_header));
915 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
917 down(¬ifier_lock);
918 if (response == CONTROLVM_RESP_SUCCESS) {
920 case CONTROLVM_BUS_CREATE:
921 /* We can't tell from the bus_create
922 * information which of our 2 bus flavors the
923 * devices on this bus will ultimately end up.
924 * FORTUNATELY, it turns out it is harmless to
925 * send the bus_create to both of them. We can
926 * narrow things down a little bit, though,
927 * because we know: - BusDev_Server can handle
928 * either server or client devices
929 * - BusDev_Client can handle ONLY client
931 if (BusDev_Server_Notifiers.bus_create) {
932 (*BusDev_Server_Notifiers.bus_create) (busNo);
935 if ((!pBusInfo->flags.server) /*client */ &&
936 BusDev_Client_Notifiers.bus_create) {
937 (*BusDev_Client_Notifiers.bus_create) (busNo);
941 case CONTROLVM_BUS_DESTROY:
942 if (BusDev_Server_Notifiers.bus_destroy) {
943 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
946 if ((!pBusInfo->flags.server) /*client */ &&
947 BusDev_Client_Notifiers.bus_destroy) {
948 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
955 /* The callback function just called above is responsible
956 * for calling the appropriate visorchipset_busdev_responders
957 * function, which will call bus_responder()
961 bus_responder(cmd, busNo, response);
966 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
967 struct controlvm_message_header *msgHdr, int response,
968 BOOL needResponse, BOOL for_visorbus)
970 struct visorchipset_busdev_notifiers *notifiers = NULL;
971 BOOL notified = FALSE;
973 struct visorchipset_device_info *pDevInfo =
974 finddevice(&dev_info_list, busNo, devNo);
976 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
984 notifiers = &BusDev_Server_Notifiers;
986 notifiers = &BusDev_Client_Notifiers;
988 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
989 sizeof(struct controlvm_message_header));
991 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
993 down(¬ifier_lock);
996 case CONTROLVM_DEVICE_CREATE:
997 if (notifiers->device_create) {
998 (*notifiers->device_create) (busNo, devNo);
1002 case CONTROLVM_DEVICE_CHANGESTATE:
1003 /* ServerReady / ServerRunning / SegmentStateRunning */
1004 if (state.alive == segment_state_running.alive &&
1006 segment_state_running.operating) {
1007 if (notifiers->device_resume) {
1008 (*notifiers->device_resume) (busNo,
1013 /* ServerNotReady / ServerLost / SegmentStateStandby */
1014 else if (state.alive == segment_state_standby.alive &&
1016 segment_state_standby.operating) {
1017 /* technically this is standby case
1018 * where server is lost
1020 if (notifiers->device_pause) {
1021 (*notifiers->device_pause) (busNo,
1025 } else if (state.alive == segment_state_paused.alive &&
1027 segment_state_paused.operating) {
1028 /* this is lite pause where channel is
1029 * still valid just 'pause' of it
1031 if (busNo == g_diagpool_bus_no &&
1032 devNo == g_diagpool_dev_no) {
1033 /* this will trigger the
1034 * diag_shutdown.sh script in
1035 * the visorchipset hotplug */
1037 (&Visorchipset_platform_device.dev.
1038 kobj, KOBJ_ONLINE, envp);
1042 case CONTROLVM_DEVICE_DESTROY:
1043 if (notifiers->device_destroy) {
1044 (*notifiers->device_destroy) (busNo, devNo);
1051 /* The callback function just called above is responsible
1052 * for calling the appropriate visorchipset_busdev_responders
1053 * function, which will call device_responder()
1057 device_responder(cmd, busNo, devNo, response);
1062 bus_create(struct controlvm_message *inmsg)
1064 struct controlvm_message_packet *cmd = &inmsg->cmd;
1065 ulong busNo = cmd->create_bus.bus_no;
1066 int rc = CONTROLVM_RESP_SUCCESS;
1067 struct visorchipset_bus_info *pBusInfo = NULL;
1070 pBusInfo = findbus(&bus_info_list, busNo);
1071 if (pBusInfo && (pBusInfo->state.created == 1)) {
1072 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1073 POSTCODE_SEVERITY_ERR);
1074 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1077 pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1078 if (pBusInfo == NULL) {
1079 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1080 POSTCODE_SEVERITY_ERR);
1081 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1085 INIT_LIST_HEAD(&pBusInfo->entry);
1086 pBusInfo->bus_no = busNo;
1087 pBusInfo->dev_no = cmd->create_bus.dev_count;
1089 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1091 if (inmsg->hdr.flags.test_message == 1)
1092 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1094 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1096 pBusInfo->flags.server = inmsg->hdr.flags.server;
1097 pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1098 pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1099 pBusInfo->chan_info.channel_type_uuid =
1100 cmd->create_bus.bus_data_type_uuid;
1101 pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1103 list_add(&pBusInfo->entry, &bus_info_list);
1105 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1108 bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1109 rc, inmsg->hdr.flags.response_expected == 1);
1113 bus_destroy(struct controlvm_message *inmsg)
1115 struct controlvm_message_packet *cmd = &inmsg->cmd;
1116 ulong busNo = cmd->destroy_bus.bus_no;
1117 struct visorchipset_bus_info *pBusInfo;
1118 int rc = CONTROLVM_RESP_SUCCESS;
1120 pBusInfo = findbus(&bus_info_list, busNo);
1122 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1125 if (pBusInfo->state.created == 0) {
1126 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1131 bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1132 rc, inmsg->hdr.flags.response_expected == 1);
1136 bus_configure(struct controlvm_message *inmsg,
1137 struct parser_context *parser_ctx)
1139 struct controlvm_message_packet *cmd = &inmsg->cmd;
1140 ulong busNo = cmd->configure_bus.bus_no;
1141 struct visorchipset_bus_info *pBusInfo = NULL;
1142 int rc = CONTROLVM_RESP_SUCCESS;
1145 busNo = cmd->configure_bus.bus_no;
1146 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1148 pBusInfo = findbus(&bus_info_list, busNo);
1150 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1151 POSTCODE_SEVERITY_ERR);
1152 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1155 if (pBusInfo->state.created == 0) {
1156 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1157 POSTCODE_SEVERITY_ERR);
1158 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1161 /* TBD - add this check to other commands also... */
1162 if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1163 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1164 POSTCODE_SEVERITY_ERR);
1165 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1169 pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1170 pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1171 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1172 pBusInfo->name = parser_string_get(parser_ctx);
1174 visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1175 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1177 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1178 rc, inmsg->hdr.flags.response_expected == 1);
1182 my_device_create(struct controlvm_message *inmsg)
1184 struct controlvm_message_packet *cmd = &inmsg->cmd;
1185 ulong busNo = cmd->create_device.bus_no;
1186 ulong devNo = cmd->create_device.dev_no;
1187 struct visorchipset_device_info *pDevInfo = NULL;
1188 struct visorchipset_bus_info *pBusInfo = NULL;
1189 int rc = CONTROLVM_RESP_SUCCESS;
1191 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1192 if (pDevInfo && (pDevInfo->state.created == 1)) {
1193 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1194 POSTCODE_SEVERITY_ERR);
1195 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1198 pBusInfo = findbus(&bus_info_list, busNo);
1200 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1201 POSTCODE_SEVERITY_ERR);
1202 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1205 if (pBusInfo->state.created == 0) {
1206 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1207 POSTCODE_SEVERITY_ERR);
1208 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1211 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1212 if (pDevInfo == NULL) {
1213 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1214 POSTCODE_SEVERITY_ERR);
1215 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1219 INIT_LIST_HEAD(&pDevInfo->entry);
1220 pDevInfo->bus_no = busNo;
1221 pDevInfo->dev_no = devNo;
1222 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1223 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1224 POSTCODE_SEVERITY_INFO);
1226 if (inmsg->hdr.flags.test_message == 1)
1227 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1229 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1230 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1231 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1232 pDevInfo->chan_info.channel_type_uuid =
1233 cmd->create_device.data_type_uuid;
1234 pDevInfo->chan_info.intr = cmd->create_device.intr;
1235 list_add(&pDevInfo->entry, &dev_info_list);
1236 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1237 POSTCODE_SEVERITY_INFO);
1239 /* get the bus and devNo for DiagPool channel */
1241 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1242 g_diagpool_bus_no = busNo;
1243 g_diagpool_dev_no = devNo;
1245 device_epilog(busNo, devNo, segment_state_running,
1246 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1247 inmsg->hdr.flags.response_expected == 1,
1248 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1252 my_device_changestate(struct controlvm_message *inmsg)
1254 struct controlvm_message_packet *cmd = &inmsg->cmd;
1255 ulong busNo = cmd->device_change_state.bus_no;
1256 ulong devNo = cmd->device_change_state.dev_no;
1257 struct spar_segment_state state = cmd->device_change_state.state;
1258 struct visorchipset_device_info *pDevInfo = NULL;
1259 int rc = CONTROLVM_RESP_SUCCESS;
1261 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1263 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1264 POSTCODE_SEVERITY_ERR);
1265 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1268 if (pDevInfo->state.created == 0) {
1269 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1270 POSTCODE_SEVERITY_ERR);
1271 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1274 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1275 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1277 inmsg->hdr.flags.response_expected == 1,
1279 pDevInfo->chan_info.channel_type_uuid));
1283 my_device_destroy(struct controlvm_message *inmsg)
1285 struct controlvm_message_packet *cmd = &inmsg->cmd;
1286 ulong busNo = cmd->destroy_device.bus_no;
1287 ulong devNo = cmd->destroy_device.dev_no;
1288 struct visorchipset_device_info *pDevInfo = NULL;
1289 int rc = CONTROLVM_RESP_SUCCESS;
1291 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1293 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1296 if (pDevInfo->state.created == 0) {
1297 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1301 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1302 device_epilog(busNo, devNo, segment_state_running,
1303 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1304 inmsg->hdr.flags.response_expected == 1,
1306 pDevInfo->chan_info.channel_type_uuid));
1309 /* When provided with the physical address of the controlvm channel
1310 * (phys_addr), the offset to the payload area we need to manage
1311 * (offset), and the size of this payload area (bytes), fills in the
1312 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1316 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1317 struct controlvm_payload_info *info)
1319 u8 __iomem *payload = NULL;
1320 int rc = CONTROLVM_RESP_SUCCESS;
1323 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1326 memset(info, 0, sizeof(struct controlvm_payload_info));
1327 if ((offset == 0) || (bytes == 0)) {
1328 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1331 payload = ioremap_cache(phys_addr + offset, bytes);
1332 if (payload == NULL) {
1333 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1337 info->offset = offset;
1338 info->bytes = bytes;
1339 info->ptr = payload;
1343 if (payload != NULL) {
1352 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1354 if (info->ptr != NULL) {
1358 memset(info, 0, sizeof(struct controlvm_payload_info));
1362 initialize_controlvm_payload(void)
1364 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1365 u64 payloadOffset = 0;
1366 u32 payloadBytes = 0;
1368 if (visorchannel_read(controlvm_channel,
1369 offsetof(struct spar_controlvm_channel_protocol,
1370 request_payload_offset),
1371 &payloadOffset, sizeof(payloadOffset)) < 0) {
1372 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1373 POSTCODE_SEVERITY_ERR);
1376 if (visorchannel_read(controlvm_channel,
1377 offsetof(struct spar_controlvm_channel_protocol,
1378 request_payload_bytes),
1379 &payloadBytes, sizeof(payloadBytes)) < 0) {
1380 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1381 POSTCODE_SEVERITY_ERR);
1384 initialize_controlvm_payload_info(phys_addr,
1385 payloadOffset, payloadBytes,
1386 &ControlVm_payload_info);
1389 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1390 * Returns CONTROLVM_RESP_xxx code.
1393 visorchipset_chipset_ready(void)
1395 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1396 return CONTROLVM_RESP_SUCCESS;
1398 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1401 visorchipset_chipset_selftest(void)
1403 char env_selftest[20];
1404 char *envp[] = { env_selftest, NULL };
1406 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1407 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1409 return CONTROLVM_RESP_SUCCESS;
1411 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1413 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1414 * Returns CONTROLVM_RESP_xxx code.
1417 visorchipset_chipset_notready(void)
1419 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1420 return CONTROLVM_RESP_SUCCESS;
1422 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1425 chipset_ready(struct controlvm_message_header *msgHdr)
1427 int rc = visorchipset_chipset_ready();
1429 if (rc != CONTROLVM_RESP_SUCCESS)
1431 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1432 controlvm_respond(msgHdr, rc);
1433 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1434 /* Send CHIPSET_READY response when all modules have been loaded
1435 * and disks mounted for the partition
1437 g_chipset_msg_hdr = *msgHdr;
1442 chipset_selftest(struct controlvm_message_header *msgHdr)
1444 int rc = visorchipset_chipset_selftest();
1446 if (rc != CONTROLVM_RESP_SUCCESS)
1448 if (msgHdr->flags.response_expected)
1449 controlvm_respond(msgHdr, rc);
1453 chipset_notready(struct controlvm_message_header *msgHdr)
1455 int rc = visorchipset_chipset_notready();
1457 if (rc != CONTROLVM_RESP_SUCCESS)
1459 if (msgHdr->flags.response_expected)
1460 controlvm_respond(msgHdr, rc);
1463 /* This is your "one-stop" shop for grabbing the next message from the
1464 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1467 read_controlvm_event(struct controlvm_message *msg)
1469 if (visorchannel_signalremove(controlvm_channel,
1470 CONTROLVM_QUEUE_EVENT, msg)) {
1472 if (msg->hdr.flags.test_message == 1)
1480 * The general parahotplug flow works as follows. The visorchipset
1481 * driver receives a DEVICE_CHANGESTATE message from Command
1482 * specifying a physical device to enable or disable. The CONTROLVM
1483 * message handler calls parahotplug_process_message, which then adds
1484 * the message to a global list and kicks off a udev event which
1485 * causes a user level script to enable or disable the specified
1486 * device. The udev script then writes to
1487 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1488 * to get called, at which point the appropriate CONTROLVM message is
1489 * retrieved from the list and responded to.
1492 #define PARAHOTPLUG_TIMEOUT_MS 2000
1495 * Generate unique int to match an outstanding CONTROLVM message with a
1496 * udev script /proc response
1499 parahotplug_next_id(void)
1501 static atomic_t id = ATOMIC_INIT(0);
1503 return atomic_inc_return(&id);
1507 * Returns the time (in jiffies) when a CONTROLVM message on the list
1508 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1510 static unsigned long
1511 parahotplug_next_expiration(void)
1513 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1517 * Create a parahotplug_request, which is basically a wrapper for a
1518 * CONTROLVM_MESSAGE that we can stick on a list
1520 static struct parahotplug_request *
1521 parahotplug_request_create(struct controlvm_message *msg)
1523 struct parahotplug_request *req;
1525 req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
1529 req->id = parahotplug_next_id();
1530 req->expiration = parahotplug_next_expiration();
1537 * Free a parahotplug_request.
1540 parahotplug_request_destroy(struct parahotplug_request *req)
1546 * Cause uevent to run the user level script to do the disable/enable
1547 * specified in (the CONTROLVM message in) the specified
1548 * parahotplug_request
1551 parahotplug_request_kickoff(struct parahotplug_request *req)
1553 struct controlvm_message_packet *cmd = &req->msg.cmd;
1554 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1557 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1560 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1561 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1562 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1563 cmd->device_change_state.state.active);
1564 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1565 cmd->device_change_state.bus_no);
1566 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1567 cmd->device_change_state.dev_no >> 3);
1568 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1569 cmd->device_change_state.dev_no & 0x7);
1571 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1576 * Remove any request from the list that's been on there too long and
1577 * respond with an error.
1580 parahotplug_process_list(void)
1582 struct list_head *pos = NULL;
1583 struct list_head *tmp = NULL;
1585 spin_lock(&Parahotplug_request_list_lock);
1587 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1588 struct parahotplug_request *req =
1589 list_entry(pos, struct parahotplug_request, list);
1590 if (time_after_eq(jiffies, req->expiration)) {
1592 if (req->msg.hdr.flags.response_expected)
1593 controlvm_respond_physdev_changestate(
1595 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1596 req->msg.cmd.device_change_state.state);
1597 parahotplug_request_destroy(req);
1601 spin_unlock(&Parahotplug_request_list_lock);
1605 * Called from the /proc handler, which means the user script has
1606 * finished the enable/disable. Find the matching identifier, and
1607 * respond to the CONTROLVM message with success.
1610 parahotplug_request_complete(int id, u16 active)
1612 struct list_head *pos = NULL;
1613 struct list_head *tmp = NULL;
1615 spin_lock(&Parahotplug_request_list_lock);
1617 /* Look for a request matching "id". */
1618 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1619 struct parahotplug_request *req =
1620 list_entry(pos, struct parahotplug_request, list);
1621 if (req->id == id) {
1622 /* Found a match. Remove it from the list and
1626 spin_unlock(&Parahotplug_request_list_lock);
1627 req->msg.cmd.device_change_state.state.active = active;
1628 if (req->msg.hdr.flags.response_expected)
1629 controlvm_respond_physdev_changestate(
1630 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1631 req->msg.cmd.device_change_state.state);
1632 parahotplug_request_destroy(req);
1637 spin_unlock(&Parahotplug_request_list_lock);
1642 * Enables or disables a PCI device by kicking off a udev script
1645 parahotplug_process_message(struct controlvm_message *inmsg)
1647 struct parahotplug_request *req;
1649 req = parahotplug_request_create(inmsg);
1654 if (inmsg->cmd.device_change_state.state.active) {
1655 /* For enable messages, just respond with success
1656 * right away. This is a bit of a hack, but there are
1657 * issues with the early enable messages we get (with
1658 * either the udev script not detecting that the device
1659 * is up, or not getting called at all). Fortunately
1660 * the messages that get lost don't matter anyway, as
1661 * devices are automatically enabled at
1664 parahotplug_request_kickoff(req);
1665 controlvm_respond_physdev_changestate(&inmsg->hdr,
1666 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1667 device_change_state.state);
1668 parahotplug_request_destroy(req);
1670 /* For disable messages, add the request to the
1671 * request list before kicking off the udev script. It
1672 * won't get responded to until the script has
1673 * indicated it's done.
1675 spin_lock(&Parahotplug_request_list_lock);
1676 list_add_tail(&(req->list), &Parahotplug_request_list);
1677 spin_unlock(&Parahotplug_request_list_lock);
1679 parahotplug_request_kickoff(req);
1683 /* Process a controlvm message.
1685 * FALSE - this function will return FALSE only in the case where the
1686 * controlvm message was NOT processed, but processing must be
1687 * retried before reading the next controlvm message; a
1688 * scenario where this can occur is when we need to throttle
1689 * the allocation of memory in which to copy out controlvm
1691 * TRUE - processing of the controlvm message completed,
1692 * either successfully or with an error.
1695 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1697 struct controlvm_message_packet *cmd = &inmsg.cmd;
1698 u64 parametersAddr = 0;
1699 u32 parametersBytes = 0;
1700 struct parser_context *parser_ctx = NULL;
1701 BOOL isLocalAddr = FALSE;
1702 struct controlvm_message ackmsg;
1704 /* create parsing context if necessary */
1705 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1706 if (channel_addr == 0)
1708 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1709 parametersBytes = inmsg.hdr.payload_bytes;
1711 /* Parameter and channel addresses within test messages actually lie
1712 * within our OS-controlled memory. We need to know that, because it
1713 * makes a difference in how we compute the virtual address.
1715 if (parametersAddr != 0 && parametersBytes != 0) {
1719 parser_init_byte_stream(parametersAddr, parametersBytes,
1720 isLocalAddr, &retry);
1721 if (!parser_ctx && retry)
1726 controlvm_init_response(&ackmsg, &inmsg.hdr,
1727 CONTROLVM_RESP_SUCCESS);
1728 if (controlvm_channel)
1729 visorchannel_signalinsert(controlvm_channel,
1730 CONTROLVM_QUEUE_ACK,
1733 switch (inmsg.hdr.id) {
1734 case CONTROLVM_CHIPSET_INIT:
1735 chipset_init(&inmsg);
1737 case CONTROLVM_BUS_CREATE:
1740 case CONTROLVM_BUS_DESTROY:
1741 bus_destroy(&inmsg);
1743 case CONTROLVM_BUS_CONFIGURE:
1744 bus_configure(&inmsg, parser_ctx);
1746 case CONTROLVM_DEVICE_CREATE:
1747 my_device_create(&inmsg);
1749 case CONTROLVM_DEVICE_CHANGESTATE:
1750 if (cmd->device_change_state.flags.phys_device) {
1751 parahotplug_process_message(&inmsg);
1753 /* save the hdr and cmd structures for later use */
1754 /* when sending back the response to Command */
1755 my_device_changestate(&inmsg);
1756 g_diag_msg_hdr = inmsg.hdr;
1757 g_devicechangestate_packet = inmsg.cmd;
1761 case CONTROLVM_DEVICE_DESTROY:
1762 my_device_destroy(&inmsg);
1764 case CONTROLVM_DEVICE_CONFIGURE:
1765 /* no op for now, just send a respond that we passed */
1766 if (inmsg.hdr.flags.response_expected)
1767 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1769 case CONTROLVM_CHIPSET_READY:
1770 chipset_ready(&inmsg.hdr);
1772 case CONTROLVM_CHIPSET_SELFTEST:
1773 chipset_selftest(&inmsg.hdr);
1775 case CONTROLVM_CHIPSET_STOP:
1776 chipset_notready(&inmsg.hdr);
1779 if (inmsg.hdr.flags.response_expected)
1780 controlvm_respond(&inmsg.hdr,
1781 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1785 if (parser_ctx != NULL) {
1786 parser_done(parser_ctx);
1792 static HOSTADDRESS controlvm_get_channel_address(void)
1797 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1804 controlvm_periodic_work(struct work_struct *work)
1806 struct controlvm_message inmsg;
1807 BOOL gotACommand = FALSE;
1808 BOOL handle_command_failed = FALSE;
1809 static u64 Poll_Count;
1811 /* make sure visorbus server is registered for controlvm callbacks */
1812 if (visorchipset_serverregwait && !serverregistered)
1814 /* make sure visorclientbus server is regsitered for controlvm
1817 if (visorchipset_clientregwait && !clientregistered)
1821 if (Poll_Count >= 250)
1826 /* Check events to determine if response to CHIPSET_READY
1829 if (visorchipset_holdchipsetready &&
1830 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1831 if (check_chipset_events() == 1) {
1832 controlvm_respond(&g_chipset_msg_hdr, 0);
1833 clear_chipset_events();
1834 memset(&g_chipset_msg_hdr, 0,
1835 sizeof(struct controlvm_message_header));
1839 while (visorchannel_signalremove(controlvm_channel,
1840 CONTROLVM_QUEUE_RESPONSE,
1844 if (ControlVm_Pending_Msg_Valid) {
1845 /* we throttled processing of a prior
1846 * msg, so try to process it again
1847 * rather than reading a new one
1849 inmsg = ControlVm_Pending_Msg;
1850 ControlVm_Pending_Msg_Valid = FALSE;
1853 gotACommand = read_controlvm_event(&inmsg);
1856 handle_command_failed = FALSE;
1857 while (gotACommand && (!handle_command_failed)) {
1858 most_recent_message_jiffies = jiffies;
1859 if (handle_command(inmsg,
1860 visorchannel_get_physaddr
1861 (controlvm_channel)))
1862 gotACommand = read_controlvm_event(&inmsg);
1864 /* this is a scenario where throttling
1865 * is required, but probably NOT an
1866 * error...; we stash the current
1867 * controlvm msg so we will attempt to
1868 * reprocess it on our next loop
1870 handle_command_failed = TRUE;
1871 ControlVm_Pending_Msg = inmsg;
1872 ControlVm_Pending_Msg_Valid = TRUE;
1876 /* parahotplug_worker */
1877 parahotplug_process_list();
1881 if (time_after(jiffies,
1882 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1883 /* it's been longer than MIN_IDLE_SECONDS since we
1884 * processed our last controlvm message; slow down the
1887 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1888 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1890 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1891 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1894 queue_delayed_work(periodic_controlvm_workqueue,
1895 &periodic_controlvm_work, poll_jiffies);
1899 setup_crash_devices_work_queue(struct work_struct *work)
1902 struct controlvm_message localCrashCreateBusMsg;
1903 struct controlvm_message localCrashCreateDevMsg;
1904 struct controlvm_message msg;
1905 u32 localSavedCrashMsgOffset;
1906 u16 localSavedCrashMsgCount;
1908 /* make sure visorbus server is registered for controlvm callbacks */
1909 if (visorchipset_serverregwait && !serverregistered)
1912 /* make sure visorclientbus server is regsitered for controlvm
1915 if (visorchipset_clientregwait && !clientregistered)
1918 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1920 /* send init chipset msg */
1921 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1922 msg.cmd.init_chipset.bus_count = 23;
1923 msg.cmd.init_chipset.switch_count = 0;
1927 /* get saved message count */
1928 if (visorchannel_read(controlvm_channel,
1929 offsetof(struct spar_controlvm_channel_protocol,
1930 saved_crash_message_count),
1931 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
1932 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1933 POSTCODE_SEVERITY_ERR);
1937 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
1938 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1939 localSavedCrashMsgCount,
1940 POSTCODE_SEVERITY_ERR);
1944 /* get saved crash message offset */
1945 if (visorchannel_read(controlvm_channel,
1946 offsetof(struct spar_controlvm_channel_protocol,
1947 saved_crash_message_offset),
1948 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
1949 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1950 POSTCODE_SEVERITY_ERR);
1954 /* read create device message for storage bus offset */
1955 if (visorchannel_read(controlvm_channel,
1956 localSavedCrashMsgOffset,
1957 &localCrashCreateBusMsg,
1958 sizeof(struct controlvm_message)) < 0) {
1959 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1960 POSTCODE_SEVERITY_ERR);
1964 /* read create device message for storage device */
1965 if (visorchannel_read(controlvm_channel,
1966 localSavedCrashMsgOffset +
1967 sizeof(struct controlvm_message),
1968 &localCrashCreateDevMsg,
1969 sizeof(struct controlvm_message)) < 0) {
1970 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1971 POSTCODE_SEVERITY_ERR);
1975 /* reuse IOVM create bus message */
1976 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
1977 bus_create(&localCrashCreateBusMsg);
1979 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1980 POSTCODE_SEVERITY_ERR);
1984 /* reuse create device message for storage device */
1985 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
1986 my_device_create(&localCrashCreateDevMsg);
1988 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1989 POSTCODE_SEVERITY_ERR);
1992 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1997 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1999 queue_delayed_work(periodic_controlvm_workqueue,
2000 &periodic_controlvm_work, poll_jiffies);
2004 bus_create_response(ulong busNo, int response)
2006 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2010 bus_destroy_response(ulong busNo, int response)
2012 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2016 device_create_response(ulong busNo, ulong devNo, int response)
2018 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2022 device_destroy_response(ulong busNo, ulong devNo, int response)
2024 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2028 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
2031 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2032 bus_no, dev_no, response,
2033 segment_state_standby);
2035 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2038 device_resume_response(ulong busNo, ulong devNo, int response)
2040 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2041 busNo, devNo, response,
2042 segment_state_running);
2046 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2048 void *p = findbus(&bus_info_list, bus_no);
2052 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2055 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2058 visorchipset_set_bus_context(ulong bus_no, void *context)
2060 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2064 p->bus_driver_context = context;
2067 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2070 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2071 struct visorchipset_device_info *dev_info)
2073 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2077 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2080 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2083 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2085 struct visorchipset_device_info *p =
2086 finddevice(&dev_info_list, bus_no, dev_no);
2090 p->bus_driver_context = context;
2093 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2095 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2098 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2108 /* __GFP_NORETRY means "ok to fail", meaning
2109 * kmem_cache_alloc() can return NULL, implying the caller CAN
2110 * cope with failure. If you do NOT specify __GFP_NORETRY,
2111 * Linux will go to extreme measures to get memory for you
2112 * (like, invoke oom killer), which will probably cripple the
2115 gfp |= __GFP_NORETRY;
2116 p = kmem_cache_alloc(pool, gfp);
2120 atomic_inc(&Visorchipset_cache_buffers_in_use);
2124 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2127 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2132 atomic_dec(&Visorchipset_cache_buffers_in_use);
2133 kmem_cache_free(pool, p);
2136 static ssize_t chipsetready_store(struct device *dev,
2137 struct device_attribute *attr, const char *buf, size_t count)
2141 if (sscanf(buf, "%63s", msgtype) != 1)
2144 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2145 chipset_events[0] = 1;
2147 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2148 chipset_events[1] = 1;
2154 /* The parahotplug/devicedisabled interface gets called by our support script
2155 * when an SR-IOV device has been shut down. The ID is passed to the script
2156 * and then passed back when the device has been removed.
2158 static ssize_t devicedisabled_store(struct device *dev,
2159 struct device_attribute *attr, const char *buf, size_t count)
2163 if (kstrtouint(buf, 10, &id) != 0)
2166 parahotplug_request_complete(id, 0);
2170 /* The parahotplug/deviceenabled interface gets called by our support script
2171 * when an SR-IOV device has been recovered. The ID is passed to the script
2172 * and then passed back when the device has been brought back up.
2174 static ssize_t deviceenabled_store(struct device *dev,
2175 struct device_attribute *attr, const char *buf, size_t count)
2179 if (kstrtouint(buf, 10, &id) != 0)
2182 parahotplug_request_complete(id, 1);
2187 visorchipset_init(void)
2192 if (!unisys_spar_platform)
2195 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2196 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2197 memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
2198 memset(&LiveDump_info, 0, sizeof(LiveDump_info));
2199 atomic_set(&LiveDump_info.buffers_in_use, 0);
2201 if (visorchipset_testvnic) {
2202 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2207 addr = controlvm_get_channel_address();
2210 visorchannel_create_with_lock
2212 sizeof(struct spar_controlvm_channel_protocol),
2213 spar_controlvm_channel_protocol_uuid);
2214 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2215 visorchannel_get_header(controlvm_channel))) {
2216 initialize_controlvm_payload();
2218 visorchannel_destroy(controlvm_channel);
2219 controlvm_channel = NULL;
2226 MajorDev = MKDEV(visorchipset_major, 0);
2227 rc = visorchipset_file_init(MajorDev, &controlvm_channel);
2229 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2233 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2235 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2237 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2239 Putfile_buffer_list_pool =
2240 kmem_cache_create(Putfile_buffer_list_pool_name,
2241 sizeof(struct putfile_buffer_entry),
2242 0, SLAB_HWCACHE_ALIGN, NULL);
2243 if (!Putfile_buffer_list_pool) {
2244 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2248 if (!visorchipset_disable_controlvm) {
2249 /* if booting in a crash kernel */
2250 if (visorchipset_crash_kernel)
2251 INIT_DELAYED_WORK(&periodic_controlvm_work,
2252 setup_crash_devices_work_queue);
2254 INIT_DELAYED_WORK(&periodic_controlvm_work,
2255 controlvm_periodic_work);
2256 periodic_controlvm_workqueue =
2257 create_singlethread_workqueue("visorchipset_controlvm");
2259 if (periodic_controlvm_workqueue == NULL) {
2260 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2265 most_recent_message_jiffies = jiffies;
2266 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2267 rc = queue_delayed_work(periodic_controlvm_workqueue,
2268 &periodic_controlvm_work, poll_jiffies);
2270 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2277 Visorchipset_platform_device.dev.devt = MajorDev;
2278 if (platform_device_register(&Visorchipset_platform_device) < 0) {
2279 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2283 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2287 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2288 POSTCODE_SEVERITY_ERR);
2294 visorchipset_exit(void)
2296 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2298 if (visorchipset_disable_controlvm) {
2301 cancel_delayed_work(&periodic_controlvm_work);
2302 flush_workqueue(periodic_controlvm_workqueue);
2303 destroy_workqueue(periodic_controlvm_workqueue);
2304 periodic_controlvm_workqueue = NULL;
2305 destroy_controlvm_payload_info(&ControlVm_payload_info);
2307 if (Putfile_buffer_list_pool) {
2308 kmem_cache_destroy(Putfile_buffer_list_pool);
2309 Putfile_buffer_list_pool = NULL;
2312 cleanup_controlvm_structures();
2314 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2316 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2318 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2320 visorchannel_destroy(controlvm_channel);
2322 visorchipset_file_cleanup();
2323 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2326 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2327 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2328 int visorchipset_testvnic = 0;
2330 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2331 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2332 int visorchipset_testvnicclient = 0;
2334 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2335 MODULE_PARM_DESC(visorchipset_testmsg,
2336 "1 to manufacture the chipset, bus, and switch messages");
2337 int visorchipset_testmsg = 0;
2339 module_param_named(major, visorchipset_major, int, S_IRUGO);
2340 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2341 int visorchipset_major = 0;
2343 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2344 MODULE_PARM_DESC(visorchipset_serverreqwait,
2345 "1 to have the module wait for the visor bus to register");
2346 int visorchipset_serverregwait = 0; /* default is off */
2347 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2348 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2349 int visorchipset_clientregwait = 1; /* default is on */
2350 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2351 MODULE_PARM_DESC(visorchipset_testteardown,
2352 "1 to test teardown of the chipset, bus, and switch");
2353 int visorchipset_testteardown = 0; /* default is off */
2354 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2356 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2357 "1 to disable polling of controlVm channel");
2358 int visorchipset_disable_controlvm = 0; /* default is off */
2359 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2360 MODULE_PARM_DESC(visorchipset_crash_kernel,
2361 "1 means we are running in crash kernel");
2362 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2363 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2365 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2366 "1 to hold response to CHIPSET_READY");
2367 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2368 * response immediately */
2369 module_init(visorchipset_init);
2370 module_exit(visorchipset_exit);
2372 MODULE_AUTHOR("Unisys");
2373 MODULE_LICENSE("GPL");
2374 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2376 MODULE_VERSION(VERSION);