3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode. As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies; /* when we got our last
53 * controlvm message */
62 static int serverregistered;
63 static int clientregistered;
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86 (((uuid_le_cmp(channel_type_guid,\
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
89 spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
92 #define is_diagpool_channel(channel_type_guid) \
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
99 static struct visorchannel *controlvm_channel;
101 /* Manages the request payload in the controlvm channel */
102 static struct controlvm_payload_info {
103 u8 __iomem *ptr; /* pointer to base address of payload pool */
104 u64 offset; /* offset from beginning of controlvm
105 * channel to beginning of payload * pool */
106 u32 bytes; /* number of bytes in payload pool */
107 } controlvm_payload_info;
109 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
110 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
112 static struct livedump_info {
113 struct controlvm_message_header dumpcapture_header;
114 struct controlvm_message_header gettextdump_header;
115 struct controlvm_message_header dumpcomplete_header;
116 BOOL gettextdump_outstanding;
119 atomic_t buffers_in_use;
123 /* The following globals are used to handle the scenario where we are unable to
124 * offload the payload from a controlvm message due to memory requirements. In
125 * this scenario, we simply stash the controlvm message, then attempt to
126 * process it again the next time controlvm_periodic_work() runs.
128 static struct controlvm_message ControlVm_Pending_Msg;
129 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
131 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
132 * TRANSMIT_FILE PutFile payloads.
134 static struct kmem_cache *Putfile_buffer_list_pool;
135 static const char Putfile_buffer_list_pool_name[] =
136 "controlvm_putfile_buffer_list_pool";
138 /* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
141 struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
143 struct parser_context *parser_ctx; /* points to input data buffer */
146 /* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
150 static LIST_HEAD(Putfile_request_list);
152 /* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
156 struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
158 struct parser_context *parser_ctx;
159 /* points within data area of parser_ctx to next byte of data */
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
165 #define PUTFILE_REQUEST_SIG 0x0906101302281211
166 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
170 struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
173 /* header from original TransmitFile request */
174 struct controlvm_message_header controlvm_header;
175 u64 file_request_number; /* from original TransmitFile request */
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
206 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
208 struct parahotplug_request {
209 struct list_head list;
211 unsigned long expiration;
212 struct controlvm_message msg;
215 static LIST_HEAD(Parahotplug_request_list);
216 static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
217 static void parahotplug_process_list(void);
219 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
220 * CONTROLVM_REPORTEVENT.
222 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
223 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
225 static void bus_create_response(ulong busNo, int response);
226 static void bus_destroy_response(ulong busNo, int response);
227 static void device_create_response(ulong busNo, ulong devNo, int response);
228 static void device_destroy_response(ulong busNo, ulong devNo, int response);
229 static void device_resume_response(ulong busNo, ulong devNo, int response);
231 static struct visorchipset_busdev_responders BusDev_Responders = {
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
236 .device_pause = visorchipset_device_pause_response,
237 .device_resume = device_resume_response,
240 /* info for /dev/visorchipset */
241 static dev_t MajorDev = -1; /**< indicates major num for device */
243 /* prototypes for attributes */
244 static ssize_t toolaction_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
246 static ssize_t toolaction_store(struct device *dev,
247 struct device_attribute *attr, const char *buf, size_t count);
248 static DEVICE_ATTR_RW(toolaction);
250 static ssize_t boottotool_show(struct device *dev,
251 struct device_attribute *attr, char *buf);
252 static ssize_t boottotool_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t count);
254 static DEVICE_ATTR_RW(boottotool);
256 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
258 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
259 const char *buf, size_t count);
260 static DEVICE_ATTR_RW(error);
262 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
264 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
265 const char *buf, size_t count);
266 static DEVICE_ATTR_RW(textid);
268 static ssize_t remaining_steps_show(struct device *dev,
269 struct device_attribute *attr, char *buf);
270 static ssize_t remaining_steps_store(struct device *dev,
271 struct device_attribute *attr, const char *buf, size_t count);
272 static DEVICE_ATTR_RW(remaining_steps);
274 static ssize_t chipsetready_store(struct device *dev,
275 struct device_attribute *attr, const char *buf, size_t count);
276 static DEVICE_ATTR_WO(chipsetready);
278 static ssize_t devicedisabled_store(struct device *dev,
279 struct device_attribute *attr, const char *buf, size_t count);
280 static DEVICE_ATTR_WO(devicedisabled);
282 static ssize_t deviceenabled_store(struct device *dev,
283 struct device_attribute *attr, const char *buf, size_t count);
284 static DEVICE_ATTR_WO(deviceenabled);
286 static struct attribute *visorchipset_install_attrs[] = {
287 &dev_attr_toolaction.attr,
288 &dev_attr_boottotool.attr,
289 &dev_attr_error.attr,
290 &dev_attr_textid.attr,
291 &dev_attr_remaining_steps.attr,
295 static struct attribute_group visorchipset_install_group = {
297 .attrs = visorchipset_install_attrs
300 static struct attribute *visorchipset_guest_attrs[] = {
301 &dev_attr_chipsetready.attr,
305 static struct attribute_group visorchipset_guest_group = {
307 .attrs = visorchipset_guest_attrs
310 static struct attribute *visorchipset_parahotplug_attrs[] = {
311 &dev_attr_devicedisabled.attr,
312 &dev_attr_deviceenabled.attr,
316 static struct attribute_group visorchipset_parahotplug_group = {
317 .name = "parahotplug",
318 .attrs = visorchipset_parahotplug_attrs
321 static const struct attribute_group *visorchipset_dev_groups[] = {
322 &visorchipset_install_group,
323 &visorchipset_guest_group,
324 &visorchipset_parahotplug_group,
328 /* /sys/devices/platform/visorchipset */
329 static struct platform_device Visorchipset_platform_device = {
330 .name = "visorchipset",
332 .dev.groups = visorchipset_dev_groups,
335 /* Function prototypes */
336 static void controlvm_respond(struct controlvm_message_header *msgHdr,
338 static void controlvm_respond_chipset_init(
339 struct controlvm_message_header *msgHdr, int response,
340 enum ultra_chipset_feature features);
341 static void controlvm_respond_physdev_changestate(
342 struct controlvm_message_header *msgHdr, int response,
343 struct spar_segment_state state);
345 static ssize_t toolaction_show(struct device *dev,
346 struct device_attribute *attr,
351 visorchannel_read(controlvm_channel,
352 offsetof(struct spar_controlvm_channel_protocol,
353 tool_action), &toolAction, sizeof(u8));
354 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
357 static ssize_t toolaction_store(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf, size_t count)
364 if (kstrtou8(buf, 10, &toolAction) != 0)
367 ret = visorchannel_write(controlvm_channel,
368 offsetof(struct spar_controlvm_channel_protocol, tool_action),
369 &toolAction, sizeof(u8));
376 static ssize_t boottotool_show(struct device *dev,
377 struct device_attribute *attr,
380 struct efi_spar_indication efiSparIndication;
382 visorchannel_read(controlvm_channel,
383 offsetof(struct spar_controlvm_channel_protocol,
384 efi_spar_ind), &efiSparIndication,
385 sizeof(struct efi_spar_indication));
386 return scnprintf(buf, PAGE_SIZE, "%u\n",
387 efiSparIndication.boot_to_tool);
390 static ssize_t boottotool_store(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf, size_t count)
395 struct efi_spar_indication efiSparIndication;
397 if (kstrtoint(buf, 10, &val) != 0)
400 efiSparIndication.boot_to_tool = val;
401 ret = visorchannel_write(controlvm_channel,
402 offsetof(struct spar_controlvm_channel_protocol,
404 &(efiSparIndication),
405 sizeof(struct efi_spar_indication));
412 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
417 visorchannel_read(controlvm_channel, offsetof(
418 struct spar_controlvm_channel_protocol, installation_error),
419 &error, sizeof(u32));
420 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
423 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
424 const char *buf, size_t count)
429 if (kstrtou32(buf, 10, &error) != 0)
432 ret = visorchannel_write(controlvm_channel,
433 offsetof(struct spar_controlvm_channel_protocol,
435 &error, sizeof(u32));
441 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
446 visorchannel_read(controlvm_channel, offsetof(
447 struct spar_controlvm_channel_protocol, installation_text_id),
448 &textId, sizeof(u32));
449 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
452 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
453 const char *buf, size_t count)
458 if (kstrtou32(buf, 10, &textId) != 0)
461 ret = visorchannel_write(controlvm_channel,
462 offsetof(struct spar_controlvm_channel_protocol,
463 installation_text_id),
464 &textId, sizeof(u32));
470 static ssize_t remaining_steps_show(struct device *dev,
471 struct device_attribute *attr, char *buf)
475 visorchannel_read(controlvm_channel,
476 offsetof(struct spar_controlvm_channel_protocol,
477 installation_remaining_steps),
480 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
483 static ssize_t remaining_steps_store(struct device *dev,
484 struct device_attribute *attr, const char *buf, size_t count)
489 if (kstrtou16(buf, 10, &remainingSteps) != 0)
492 ret = visorchannel_write(controlvm_channel,
493 offsetof(struct spar_controlvm_channel_protocol,
494 installation_remaining_steps),
495 &remainingSteps, sizeof(u16));
505 wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
506 char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
507 wchar_t unicode2[99];
509 /* NOTE: Either due to a bug, or feature I don't understand, the
510 * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
511 * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
514 LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
515 LOGINF("utf8_wcstombs=%d",
516 chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
518 s[chrs] = '\0'; /* GRRRRRRRR */
520 LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
522 unicode2[chrs] = 0; /* GRRRRRRRR */
523 if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
524 LOGINF("strings match... good");
526 LOGINF("strings did not match!!");
531 busInfo_clear(void *v)
533 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
535 if (p->proc_object) {
536 visor_proc_DestroyObject(p->proc_object);
537 p->proc_object = NULL;
542 kfree(p->description);
543 p->description = NULL;
545 p->state.created = 0;
546 memset(p, 0, sizeof(struct visorchipset_bus_info));
550 devInfo_clear(void *v)
552 struct visorchipset_device_info *p =
553 (struct visorchipset_device_info *)(v);
555 p->state.created = 0;
556 memset(p, 0, sizeof(struct visorchipset_device_info));
560 check_chipset_events(void)
564 /* Check events to determine if response should be sent */
565 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
566 send_msg &= chipset_events[i];
571 clear_chipset_events(void)
574 /* Clear chipset_events */
575 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
576 chipset_events[i] = 0;
580 visorchipset_register_busdev_server(
581 struct visorchipset_busdev_notifiers *notifiers,
582 struct visorchipset_busdev_responders *responders,
583 struct ultra_vbus_deviceinfo *driver_info)
585 down(¬ifier_lock);
586 if (notifiers == NULL) {
587 memset(&BusDev_Server_Notifiers, 0,
588 sizeof(BusDev_Server_Notifiers));
589 serverregistered = 0; /* clear flag */
591 BusDev_Server_Notifiers = *notifiers;
592 serverregistered = 1; /* set flag */
595 *responders = BusDev_Responders;
597 bus_device_info_init(driver_info, "chipset", "visorchipset",
602 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
605 visorchipset_register_busdev_client(
606 struct visorchipset_busdev_notifiers *notifiers,
607 struct visorchipset_busdev_responders *responders,
608 struct ultra_vbus_deviceinfo *driver_info)
610 down(¬ifier_lock);
611 if (notifiers == NULL) {
612 memset(&BusDev_Client_Notifiers, 0,
613 sizeof(BusDev_Client_Notifiers));
614 clientregistered = 0; /* clear flag */
616 BusDev_Client_Notifiers = *notifiers;
617 clientregistered = 1; /* set flag */
620 *responders = BusDev_Responders;
622 bus_device_info_init(driver_info, "chipset(bolts)",
623 "visorchipset", VERSION, NULL);
626 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
629 cleanup_controlvm_structures(void)
631 struct visorchipset_bus_info *bi, *tmp_bi;
632 struct visorchipset_device_info *di, *tmp_di;
634 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
636 list_del(&bi->entry);
640 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
642 list_del(&di->entry);
648 chipset_init(struct controlvm_message *inmsg)
650 static int chipset_inited;
651 enum ultra_chipset_feature features = 0;
652 int rc = CONTROLVM_RESP_SUCCESS;
654 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
655 if (chipset_inited) {
656 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
660 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
662 /* Set features to indicate we support parahotplug (if Command
663 * also supports it). */
665 inmsg->cmd.init_chipset.
666 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
668 /* Set the "reply" bit so Command knows this is a
669 * features-aware driver. */
670 features |= ULTRA_CHIPSET_FEATURE_REPLY;
674 cleanup_controlvm_structures();
675 if (inmsg->hdr.flags.response_expected)
676 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
680 controlvm_init_response(struct controlvm_message *msg,
681 struct controlvm_message_header *msgHdr, int response)
683 memset(msg, 0, sizeof(struct controlvm_message));
684 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
685 msg->hdr.payload_bytes = 0;
686 msg->hdr.payload_vm_offset = 0;
687 msg->hdr.payload_max_bytes = 0;
689 msg->hdr.flags.failed = 1;
690 msg->hdr.completion_status = (u32) (-response);
695 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
697 struct controlvm_message outmsg;
699 controlvm_init_response(&outmsg, msgHdr, response);
700 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
701 * back the deviceChangeState structure in the packet. */
702 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
703 g_devicechangestate_packet.device_change_state.bus_no ==
705 g_devicechangestate_packet.device_change_state.dev_no ==
707 outmsg.cmd = g_devicechangestate_packet;
708 if (outmsg.hdr.flags.test_message == 1)
711 if (!visorchannel_signalinsert(controlvm_channel,
712 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
718 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
720 enum ultra_chipset_feature features)
722 struct controlvm_message outmsg;
724 controlvm_init_response(&outmsg, msgHdr, response);
725 outmsg.cmd.init_chipset.features = features;
726 if (!visorchannel_signalinsert(controlvm_channel,
727 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
732 static void controlvm_respond_physdev_changestate(
733 struct controlvm_message_header *msgHdr, int response,
734 struct spar_segment_state state)
736 struct controlvm_message outmsg;
738 controlvm_init_response(&outmsg, msgHdr, response);
739 outmsg.cmd.device_change_state.state = state;
740 outmsg.cmd.device_change_state.flags.phys_device = 1;
741 if (!visorchannel_signalinsert(controlvm_channel,
742 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
748 visorchipset_save_message(struct controlvm_message *msg,
749 enum crash_obj_type type)
751 u32 localSavedCrashMsgOffset;
752 u16 localSavedCrashMsgCount;
754 /* get saved message count */
755 if (visorchannel_read(controlvm_channel,
756 offsetof(struct spar_controlvm_channel_protocol,
757 saved_crash_message_count),
758 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
759 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
760 POSTCODE_SEVERITY_ERR);
764 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
765 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
766 localSavedCrashMsgCount,
767 POSTCODE_SEVERITY_ERR);
771 /* get saved crash message offset */
772 if (visorchannel_read(controlvm_channel,
773 offsetof(struct spar_controlvm_channel_protocol,
774 saved_crash_message_offset),
775 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
776 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
777 POSTCODE_SEVERITY_ERR);
781 if (type == CRASH_BUS) {
782 if (visorchannel_write(controlvm_channel,
783 localSavedCrashMsgOffset,
785 sizeof(struct controlvm_message)) < 0) {
786 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
787 POSTCODE_SEVERITY_ERR);
791 if (visorchannel_write(controlvm_channel,
792 localSavedCrashMsgOffset +
793 sizeof(struct controlvm_message), msg,
794 sizeof(struct controlvm_message)) < 0) {
795 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
796 POSTCODE_SEVERITY_ERR);
801 EXPORT_SYMBOL_GPL(visorchipset_save_message);
804 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
806 struct visorchipset_bus_info *p = NULL;
807 BOOL need_clear = FALSE;
809 p = findbus(&bus_info_list, busNo);
814 if ((cmdId == CONTROLVM_BUS_CREATE) &&
815 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
816 /* undo the row we just created... */
817 delbusdevices(&dev_info_list, busNo);
819 if (cmdId == CONTROLVM_BUS_CREATE)
820 p->state.created = 1;
821 if (cmdId == CONTROLVM_BUS_DESTROY)
825 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
826 return; /* no controlvm response needed */
827 if (p->pending_msg_hdr.id != (u32) cmdId)
829 controlvm_respond(&p->pending_msg_hdr, response);
830 p->pending_msg_hdr.id = CONTROLVM_INVALID;
833 delbusdevices(&dev_info_list, busNo);
838 device_changestate_responder(enum controlvm_id cmdId,
839 ulong busNo, ulong devNo, int response,
840 struct spar_segment_state responseState)
842 struct visorchipset_device_info *p = NULL;
843 struct controlvm_message outmsg;
845 p = finddevice(&dev_info_list, busNo, devNo);
848 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
849 return; /* no controlvm response needed */
850 if (p->pending_msg_hdr.id != cmdId)
853 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
855 outmsg.cmd.device_change_state.bus_no = busNo;
856 outmsg.cmd.device_change_state.dev_no = devNo;
857 outmsg.cmd.device_change_state.state = responseState;
859 if (!visorchannel_signalinsert(controlvm_channel,
860 CONTROLVM_QUEUE_REQUEST, &outmsg))
863 p->pending_msg_hdr.id = CONTROLVM_INVALID;
867 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
870 struct visorchipset_device_info *p = NULL;
871 BOOL need_clear = FALSE;
873 p = finddevice(&dev_info_list, busNo, devNo);
877 if (cmdId == CONTROLVM_DEVICE_CREATE)
878 p->state.created = 1;
879 if (cmdId == CONTROLVM_DEVICE_DESTROY)
883 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
884 return; /* no controlvm response needed */
886 if (p->pending_msg_hdr.id != (u32) cmdId)
889 controlvm_respond(&p->pending_msg_hdr, response);
890 p->pending_msg_hdr.id = CONTROLVM_INVALID;
896 bus_epilog(u32 busNo,
897 u32 cmd, struct controlvm_message_header *msgHdr,
898 int response, BOOL needResponse)
900 BOOL notified = FALSE;
902 struct visorchipset_bus_info *pBusInfo = findbus(&bus_info_list, busNo);
908 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
909 sizeof(struct controlvm_message_header));
911 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
913 down(¬ifier_lock);
914 if (response == CONTROLVM_RESP_SUCCESS) {
916 case CONTROLVM_BUS_CREATE:
917 /* We can't tell from the bus_create
918 * information which of our 2 bus flavors the
919 * devices on this bus will ultimately end up.
920 * FORTUNATELY, it turns out it is harmless to
921 * send the bus_create to both of them. We can
922 * narrow things down a little bit, though,
923 * because we know: - BusDev_Server can handle
924 * either server or client devices
925 * - BusDev_Client can handle ONLY client
927 if (BusDev_Server_Notifiers.bus_create) {
928 (*BusDev_Server_Notifiers.bus_create) (busNo);
931 if ((!pBusInfo->flags.server) /*client */ &&
932 BusDev_Client_Notifiers.bus_create) {
933 (*BusDev_Client_Notifiers.bus_create) (busNo);
937 case CONTROLVM_BUS_DESTROY:
938 if (BusDev_Server_Notifiers.bus_destroy) {
939 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
942 if ((!pBusInfo->flags.server) /*client */ &&
943 BusDev_Client_Notifiers.bus_destroy) {
944 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
951 /* The callback function just called above is responsible
952 * for calling the appropriate visorchipset_busdev_responders
953 * function, which will call bus_responder()
957 bus_responder(cmd, busNo, response);
962 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
963 struct controlvm_message_header *msgHdr, int response,
964 BOOL needResponse, BOOL for_visorbus)
966 struct visorchipset_busdev_notifiers *notifiers = NULL;
967 BOOL notified = FALSE;
969 struct visorchipset_device_info *pDevInfo =
970 finddevice(&dev_info_list, busNo, devNo);
972 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
980 notifiers = &BusDev_Server_Notifiers;
982 notifiers = &BusDev_Client_Notifiers;
984 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
985 sizeof(struct controlvm_message_header));
987 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
989 down(¬ifier_lock);
992 case CONTROLVM_DEVICE_CREATE:
993 if (notifiers->device_create) {
994 (*notifiers->device_create) (busNo, devNo);
998 case CONTROLVM_DEVICE_CHANGESTATE:
999 /* ServerReady / ServerRunning / SegmentStateRunning */
1000 if (state.alive == segment_state_running.alive &&
1002 segment_state_running.operating) {
1003 if (notifiers->device_resume) {
1004 (*notifiers->device_resume) (busNo,
1009 /* ServerNotReady / ServerLost / SegmentStateStandby */
1010 else if (state.alive == segment_state_standby.alive &&
1012 segment_state_standby.operating) {
1013 /* technically this is standby case
1014 * where server is lost
1016 if (notifiers->device_pause) {
1017 (*notifiers->device_pause) (busNo,
1021 } else if (state.alive == segment_state_paused.alive &&
1023 segment_state_paused.operating) {
1024 /* this is lite pause where channel is
1025 * still valid just 'pause' of it
1027 if (busNo == g_diagpool_bus_no &&
1028 devNo == g_diagpool_dev_no) {
1029 /* this will trigger the
1030 * diag_shutdown.sh script in
1031 * the visorchipset hotplug */
1033 (&Visorchipset_platform_device.dev.
1034 kobj, KOBJ_ONLINE, envp);
1038 case CONTROLVM_DEVICE_DESTROY:
1039 if (notifiers->device_destroy) {
1040 (*notifiers->device_destroy) (busNo, devNo);
1047 /* The callback function just called above is responsible
1048 * for calling the appropriate visorchipset_busdev_responders
1049 * function, which will call device_responder()
1053 device_responder(cmd, busNo, devNo, response);
1058 bus_create(struct controlvm_message *inmsg)
1060 struct controlvm_message_packet *cmd = &inmsg->cmd;
1061 ulong busNo = cmd->create_bus.bus_no;
1062 int rc = CONTROLVM_RESP_SUCCESS;
1063 struct visorchipset_bus_info *pBusInfo = NULL;
1066 pBusInfo = findbus(&bus_info_list, busNo);
1067 if (pBusInfo && (pBusInfo->state.created == 1)) {
1068 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1069 POSTCODE_SEVERITY_ERR);
1070 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1073 pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1074 if (pBusInfo == NULL) {
1075 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1076 POSTCODE_SEVERITY_ERR);
1077 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1081 INIT_LIST_HEAD(&pBusInfo->entry);
1082 pBusInfo->bus_no = busNo;
1083 pBusInfo->dev_no = cmd->create_bus.dev_count;
1085 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1087 if (inmsg->hdr.flags.test_message == 1)
1088 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1090 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1092 pBusInfo->flags.server = inmsg->hdr.flags.server;
1093 pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1094 pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1095 pBusInfo->chan_info.channel_type_uuid =
1096 cmd->create_bus.bus_data_type_uuid;
1097 pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1099 list_add(&pBusInfo->entry, &bus_info_list);
1101 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1104 bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1105 rc, inmsg->hdr.flags.response_expected == 1);
1109 bus_destroy(struct controlvm_message *inmsg)
1111 struct controlvm_message_packet *cmd = &inmsg->cmd;
1112 ulong busNo = cmd->destroy_bus.bus_no;
1113 struct visorchipset_bus_info *pBusInfo;
1114 int rc = CONTROLVM_RESP_SUCCESS;
1116 pBusInfo = findbus(&bus_info_list, busNo);
1118 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1121 if (pBusInfo->state.created == 0) {
1122 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1127 bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1128 rc, inmsg->hdr.flags.response_expected == 1);
1132 bus_configure(struct controlvm_message *inmsg,
1133 struct parser_context *parser_ctx)
1135 struct controlvm_message_packet *cmd = &inmsg->cmd;
1136 ulong busNo = cmd->configure_bus.bus_no;
1137 struct visorchipset_bus_info *pBusInfo = NULL;
1138 int rc = CONTROLVM_RESP_SUCCESS;
1141 busNo = cmd->configure_bus.bus_no;
1142 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1144 pBusInfo = findbus(&bus_info_list, busNo);
1146 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1147 POSTCODE_SEVERITY_ERR);
1148 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1151 if (pBusInfo->state.created == 0) {
1152 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1153 POSTCODE_SEVERITY_ERR);
1154 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1157 /* TBD - add this check to other commands also... */
1158 if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1159 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1160 POSTCODE_SEVERITY_ERR);
1161 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1165 pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1166 pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1167 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1168 pBusInfo->name = parser_string_get(parser_ctx);
1170 visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1171 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1173 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1174 rc, inmsg->hdr.flags.response_expected == 1);
1178 my_device_create(struct controlvm_message *inmsg)
1180 struct controlvm_message_packet *cmd = &inmsg->cmd;
1181 ulong busNo = cmd->create_device.bus_no;
1182 ulong devNo = cmd->create_device.dev_no;
1183 struct visorchipset_device_info *pDevInfo = NULL;
1184 struct visorchipset_bus_info *pBusInfo = NULL;
1185 int rc = CONTROLVM_RESP_SUCCESS;
1187 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1188 if (pDevInfo && (pDevInfo->state.created == 1)) {
1189 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1190 POSTCODE_SEVERITY_ERR);
1191 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1194 pBusInfo = findbus(&bus_info_list, busNo);
1196 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1197 POSTCODE_SEVERITY_ERR);
1198 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1201 if (pBusInfo->state.created == 0) {
1202 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1203 POSTCODE_SEVERITY_ERR);
1204 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1207 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1208 if (pDevInfo == NULL) {
1209 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1210 POSTCODE_SEVERITY_ERR);
1211 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1215 INIT_LIST_HEAD(&pDevInfo->entry);
1216 pDevInfo->bus_no = busNo;
1217 pDevInfo->dev_no = devNo;
1218 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1219 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1220 POSTCODE_SEVERITY_INFO);
1222 if (inmsg->hdr.flags.test_message == 1)
1223 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1225 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1226 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1227 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1228 pDevInfo->chan_info.channel_type_uuid =
1229 cmd->create_device.data_type_uuid;
1230 pDevInfo->chan_info.intr = cmd->create_device.intr;
1231 list_add(&pDevInfo->entry, &dev_info_list);
1232 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1233 POSTCODE_SEVERITY_INFO);
1235 /* get the bus and devNo for DiagPool channel */
1237 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1238 g_diagpool_bus_no = busNo;
1239 g_diagpool_dev_no = devNo;
1241 device_epilog(busNo, devNo, segment_state_running,
1242 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1243 inmsg->hdr.flags.response_expected == 1,
1244 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1248 my_device_changestate(struct controlvm_message *inmsg)
1250 struct controlvm_message_packet *cmd = &inmsg->cmd;
1251 ulong busNo = cmd->device_change_state.bus_no;
1252 ulong devNo = cmd->device_change_state.dev_no;
1253 struct spar_segment_state state = cmd->device_change_state.state;
1254 struct visorchipset_device_info *pDevInfo = NULL;
1255 int rc = CONTROLVM_RESP_SUCCESS;
1257 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1259 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1260 POSTCODE_SEVERITY_ERR);
1261 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1264 if (pDevInfo->state.created == 0) {
1265 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1266 POSTCODE_SEVERITY_ERR);
1267 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1270 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1271 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1273 inmsg->hdr.flags.response_expected == 1,
1275 pDevInfo->chan_info.channel_type_uuid));
1279 my_device_destroy(struct controlvm_message *inmsg)
1281 struct controlvm_message_packet *cmd = &inmsg->cmd;
1282 ulong busNo = cmd->destroy_device.bus_no;
1283 ulong devNo = cmd->destroy_device.dev_no;
1284 struct visorchipset_device_info *pDevInfo = NULL;
1285 int rc = CONTROLVM_RESP_SUCCESS;
1287 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1289 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1292 if (pDevInfo->state.created == 0) {
1293 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1297 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1298 device_epilog(busNo, devNo, segment_state_running,
1299 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1300 inmsg->hdr.flags.response_expected == 1,
1302 pDevInfo->chan_info.channel_type_uuid));
1305 /* When provided with the physical address of the controlvm channel
1306 * (phys_addr), the offset to the payload area we need to manage
1307 * (offset), and the size of this payload area (bytes), fills in the
1308 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1312 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1313 struct controlvm_payload_info *info)
1315 u8 __iomem *payload = NULL;
1316 int rc = CONTROLVM_RESP_SUCCESS;
1319 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1322 memset(info, 0, sizeof(struct controlvm_payload_info));
1323 if ((offset == 0) || (bytes == 0)) {
1324 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1327 payload = ioremap_cache(phys_addr + offset, bytes);
1328 if (payload == NULL) {
1329 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1333 info->offset = offset;
1334 info->bytes = bytes;
1335 info->ptr = payload;
1339 if (payload != NULL) {
1348 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1350 if (info->ptr != NULL) {
1354 memset(info, 0, sizeof(struct controlvm_payload_info));
1358 initialize_controlvm_payload(void)
1360 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1361 u64 payloadOffset = 0;
1362 u32 payloadBytes = 0;
1364 if (visorchannel_read(controlvm_channel,
1365 offsetof(struct spar_controlvm_channel_protocol,
1366 request_payload_offset),
1367 &payloadOffset, sizeof(payloadOffset)) < 0) {
1368 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1369 POSTCODE_SEVERITY_ERR);
1372 if (visorchannel_read(controlvm_channel,
1373 offsetof(struct spar_controlvm_channel_protocol,
1374 request_payload_bytes),
1375 &payloadBytes, sizeof(payloadBytes)) < 0) {
1376 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1377 POSTCODE_SEVERITY_ERR);
1380 initialize_controlvm_payload_info(phys_addr,
1381 payloadOffset, payloadBytes,
1382 &controlvm_payload_info);
1385 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1386 * Returns CONTROLVM_RESP_xxx code.
1389 visorchipset_chipset_ready(void)
1391 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1392 return CONTROLVM_RESP_SUCCESS;
1394 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1397 visorchipset_chipset_selftest(void)
1399 char env_selftest[20];
1400 char *envp[] = { env_selftest, NULL };
1402 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1403 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1405 return CONTROLVM_RESP_SUCCESS;
1407 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1409 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1410 * Returns CONTROLVM_RESP_xxx code.
1413 visorchipset_chipset_notready(void)
1415 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1416 return CONTROLVM_RESP_SUCCESS;
1418 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1421 chipset_ready(struct controlvm_message_header *msgHdr)
1423 int rc = visorchipset_chipset_ready();
1425 if (rc != CONTROLVM_RESP_SUCCESS)
1427 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1428 controlvm_respond(msgHdr, rc);
1429 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1430 /* Send CHIPSET_READY response when all modules have been loaded
1431 * and disks mounted for the partition
1433 g_chipset_msg_hdr = *msgHdr;
1438 chipset_selftest(struct controlvm_message_header *msgHdr)
1440 int rc = visorchipset_chipset_selftest();
1442 if (rc != CONTROLVM_RESP_SUCCESS)
1444 if (msgHdr->flags.response_expected)
1445 controlvm_respond(msgHdr, rc);
1449 chipset_notready(struct controlvm_message_header *msgHdr)
1451 int rc = visorchipset_chipset_notready();
1453 if (rc != CONTROLVM_RESP_SUCCESS)
1455 if (msgHdr->flags.response_expected)
1456 controlvm_respond(msgHdr, rc);
1459 /* This is your "one-stop" shop for grabbing the next message from the
1460 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1463 read_controlvm_event(struct controlvm_message *msg)
1465 if (visorchannel_signalremove(controlvm_channel,
1466 CONTROLVM_QUEUE_EVENT, msg)) {
1468 if (msg->hdr.flags.test_message == 1)
1476 * The general parahotplug flow works as follows. The visorchipset
1477 * driver receives a DEVICE_CHANGESTATE message from Command
1478 * specifying a physical device to enable or disable. The CONTROLVM
1479 * message handler calls parahotplug_process_message, which then adds
1480 * the message to a global list and kicks off a udev event which
1481 * causes a user level script to enable or disable the specified
1482 * device. The udev script then writes to
1483 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1484 * to get called, at which point the appropriate CONTROLVM message is
1485 * retrieved from the list and responded to.
1488 #define PARAHOTPLUG_TIMEOUT_MS 2000
1491 * Generate unique int to match an outstanding CONTROLVM message with a
1492 * udev script /proc response
1495 parahotplug_next_id(void)
1497 static atomic_t id = ATOMIC_INIT(0);
1499 return atomic_inc_return(&id);
1503 * Returns the time (in jiffies) when a CONTROLVM message on the list
1504 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1506 static unsigned long
1507 parahotplug_next_expiration(void)
1509 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1513 * Create a parahotplug_request, which is basically a wrapper for a
1514 * CONTROLVM_MESSAGE that we can stick on a list
1516 static struct parahotplug_request *
1517 parahotplug_request_create(struct controlvm_message *msg)
1519 struct parahotplug_request *req;
1521 req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
1525 req->id = parahotplug_next_id();
1526 req->expiration = parahotplug_next_expiration();
1533 * Free a parahotplug_request.
1536 parahotplug_request_destroy(struct parahotplug_request *req)
1542 * Cause uevent to run the user level script to do the disable/enable
1543 * specified in (the CONTROLVM message in) the specified
1544 * parahotplug_request
1547 parahotplug_request_kickoff(struct parahotplug_request *req)
1549 struct controlvm_message_packet *cmd = &req->msg.cmd;
1550 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1553 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1556 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1557 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1558 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1559 cmd->device_change_state.state.active);
1560 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1561 cmd->device_change_state.bus_no);
1562 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1563 cmd->device_change_state.dev_no >> 3);
1564 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1565 cmd->device_change_state.dev_no & 0x7);
1567 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1572 * Remove any request from the list that's been on there too long and
1573 * respond with an error.
1576 parahotplug_process_list(void)
1578 struct list_head *pos = NULL;
1579 struct list_head *tmp = NULL;
1581 spin_lock(&Parahotplug_request_list_lock);
1583 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1584 struct parahotplug_request *req =
1585 list_entry(pos, struct parahotplug_request, list);
1586 if (time_after_eq(jiffies, req->expiration)) {
1588 if (req->msg.hdr.flags.response_expected)
1589 controlvm_respond_physdev_changestate(
1591 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1592 req->msg.cmd.device_change_state.state);
1593 parahotplug_request_destroy(req);
1597 spin_unlock(&Parahotplug_request_list_lock);
1601 * Called from the /proc handler, which means the user script has
1602 * finished the enable/disable. Find the matching identifier, and
1603 * respond to the CONTROLVM message with success.
1606 parahotplug_request_complete(int id, u16 active)
1608 struct list_head *pos = NULL;
1609 struct list_head *tmp = NULL;
1611 spin_lock(&Parahotplug_request_list_lock);
1613 /* Look for a request matching "id". */
1614 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1615 struct parahotplug_request *req =
1616 list_entry(pos, struct parahotplug_request, list);
1617 if (req->id == id) {
1618 /* Found a match. Remove it from the list and
1622 spin_unlock(&Parahotplug_request_list_lock);
1623 req->msg.cmd.device_change_state.state.active = active;
1624 if (req->msg.hdr.flags.response_expected)
1625 controlvm_respond_physdev_changestate(
1626 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1627 req->msg.cmd.device_change_state.state);
1628 parahotplug_request_destroy(req);
1633 spin_unlock(&Parahotplug_request_list_lock);
1638 * Enables or disables a PCI device by kicking off a udev script
1641 parahotplug_process_message(struct controlvm_message *inmsg)
1643 struct parahotplug_request *req;
1645 req = parahotplug_request_create(inmsg);
1650 if (inmsg->cmd.device_change_state.state.active) {
1651 /* For enable messages, just respond with success
1652 * right away. This is a bit of a hack, but there are
1653 * issues with the early enable messages we get (with
1654 * either the udev script not detecting that the device
1655 * is up, or not getting called at all). Fortunately
1656 * the messages that get lost don't matter anyway, as
1657 * devices are automatically enabled at
1660 parahotplug_request_kickoff(req);
1661 controlvm_respond_physdev_changestate(&inmsg->hdr,
1662 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1663 device_change_state.state);
1664 parahotplug_request_destroy(req);
1666 /* For disable messages, add the request to the
1667 * request list before kicking off the udev script. It
1668 * won't get responded to until the script has
1669 * indicated it's done.
1671 spin_lock(&Parahotplug_request_list_lock);
1672 list_add_tail(&(req->list), &Parahotplug_request_list);
1673 spin_unlock(&Parahotplug_request_list_lock);
1675 parahotplug_request_kickoff(req);
1679 /* Process a controlvm message.
1681 * FALSE - this function will return FALSE only in the case where the
1682 * controlvm message was NOT processed, but processing must be
1683 * retried before reading the next controlvm message; a
1684 * scenario where this can occur is when we need to throttle
1685 * the allocation of memory in which to copy out controlvm
1687 * TRUE - processing of the controlvm message completed,
1688 * either successfully or with an error.
1691 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1693 struct controlvm_message_packet *cmd = &inmsg.cmd;
1694 u64 parametersAddr = 0;
1695 u32 parametersBytes = 0;
1696 struct parser_context *parser_ctx = NULL;
1697 BOOL isLocalAddr = FALSE;
1698 struct controlvm_message ackmsg;
1700 /* create parsing context if necessary */
1701 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1702 if (channel_addr == 0)
1704 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1705 parametersBytes = inmsg.hdr.payload_bytes;
1707 /* Parameter and channel addresses within test messages actually lie
1708 * within our OS-controlled memory. We need to know that, because it
1709 * makes a difference in how we compute the virtual address.
1711 if (parametersAddr != 0 && parametersBytes != 0) {
1715 parser_init_byte_stream(parametersAddr, parametersBytes,
1716 isLocalAddr, &retry);
1717 if (!parser_ctx && retry)
1722 controlvm_init_response(&ackmsg, &inmsg.hdr,
1723 CONTROLVM_RESP_SUCCESS);
1724 if (controlvm_channel)
1725 visorchannel_signalinsert(controlvm_channel,
1726 CONTROLVM_QUEUE_ACK,
1729 switch (inmsg.hdr.id) {
1730 case CONTROLVM_CHIPSET_INIT:
1731 chipset_init(&inmsg);
1733 case CONTROLVM_BUS_CREATE:
1736 case CONTROLVM_BUS_DESTROY:
1737 bus_destroy(&inmsg);
1739 case CONTROLVM_BUS_CONFIGURE:
1740 bus_configure(&inmsg, parser_ctx);
1742 case CONTROLVM_DEVICE_CREATE:
1743 my_device_create(&inmsg);
1745 case CONTROLVM_DEVICE_CHANGESTATE:
1746 if (cmd->device_change_state.flags.phys_device) {
1747 parahotplug_process_message(&inmsg);
1749 /* save the hdr and cmd structures for later use */
1750 /* when sending back the response to Command */
1751 my_device_changestate(&inmsg);
1752 g_diag_msg_hdr = inmsg.hdr;
1753 g_devicechangestate_packet = inmsg.cmd;
1757 case CONTROLVM_DEVICE_DESTROY:
1758 my_device_destroy(&inmsg);
1760 case CONTROLVM_DEVICE_CONFIGURE:
1761 /* no op for now, just send a respond that we passed */
1762 if (inmsg.hdr.flags.response_expected)
1763 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1765 case CONTROLVM_CHIPSET_READY:
1766 chipset_ready(&inmsg.hdr);
1768 case CONTROLVM_CHIPSET_SELFTEST:
1769 chipset_selftest(&inmsg.hdr);
1771 case CONTROLVM_CHIPSET_STOP:
1772 chipset_notready(&inmsg.hdr);
1775 if (inmsg.hdr.flags.response_expected)
1776 controlvm_respond(&inmsg.hdr,
1777 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1781 if (parser_ctx != NULL) {
1782 parser_done(parser_ctx);
1788 static HOSTADDRESS controlvm_get_channel_address(void)
1793 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1800 controlvm_periodic_work(struct work_struct *work)
1802 struct controlvm_message inmsg;
1803 BOOL gotACommand = FALSE;
1804 BOOL handle_command_failed = FALSE;
1805 static u64 Poll_Count;
1807 /* make sure visorbus server is registered for controlvm callbacks */
1808 if (visorchipset_serverregwait && !serverregistered)
1810 /* make sure visorclientbus server is regsitered for controlvm
1813 if (visorchipset_clientregwait && !clientregistered)
1817 if (Poll_Count >= 250)
1822 /* Check events to determine if response to CHIPSET_READY
1825 if (visorchipset_holdchipsetready &&
1826 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1827 if (check_chipset_events() == 1) {
1828 controlvm_respond(&g_chipset_msg_hdr, 0);
1829 clear_chipset_events();
1830 memset(&g_chipset_msg_hdr, 0,
1831 sizeof(struct controlvm_message_header));
1835 while (visorchannel_signalremove(controlvm_channel,
1836 CONTROLVM_QUEUE_RESPONSE,
1840 if (ControlVm_Pending_Msg_Valid) {
1841 /* we throttled processing of a prior
1842 * msg, so try to process it again
1843 * rather than reading a new one
1845 inmsg = ControlVm_Pending_Msg;
1846 ControlVm_Pending_Msg_Valid = FALSE;
1849 gotACommand = read_controlvm_event(&inmsg);
1852 handle_command_failed = FALSE;
1853 while (gotACommand && (!handle_command_failed)) {
1854 most_recent_message_jiffies = jiffies;
1855 if (handle_command(inmsg,
1856 visorchannel_get_physaddr
1857 (controlvm_channel)))
1858 gotACommand = read_controlvm_event(&inmsg);
1860 /* this is a scenario where throttling
1861 * is required, but probably NOT an
1862 * error...; we stash the current
1863 * controlvm msg so we will attempt to
1864 * reprocess it on our next loop
1866 handle_command_failed = TRUE;
1867 ControlVm_Pending_Msg = inmsg;
1868 ControlVm_Pending_Msg_Valid = TRUE;
1872 /* parahotplug_worker */
1873 parahotplug_process_list();
1877 if (time_after(jiffies,
1878 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1879 /* it's been longer than MIN_IDLE_SECONDS since we
1880 * processed our last controlvm message; slow down the
1883 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1884 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1886 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1887 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1890 queue_delayed_work(periodic_controlvm_workqueue,
1891 &periodic_controlvm_work, poll_jiffies);
1895 setup_crash_devices_work_queue(struct work_struct *work)
1897 struct controlvm_message localCrashCreateBusMsg;
1898 struct controlvm_message localCrashCreateDevMsg;
1899 struct controlvm_message msg;
1900 u32 localSavedCrashMsgOffset;
1901 u16 localSavedCrashMsgCount;
1903 /* make sure visorbus server is registered for controlvm callbacks */
1904 if (visorchipset_serverregwait && !serverregistered)
1907 /* make sure visorclientbus server is regsitered for controlvm
1910 if (visorchipset_clientregwait && !clientregistered)
1913 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1915 /* send init chipset msg */
1916 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1917 msg.cmd.init_chipset.bus_count = 23;
1918 msg.cmd.init_chipset.switch_count = 0;
1922 /* get saved message count */
1923 if (visorchannel_read(controlvm_channel,
1924 offsetof(struct spar_controlvm_channel_protocol,
1925 saved_crash_message_count),
1926 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
1927 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1928 POSTCODE_SEVERITY_ERR);
1932 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
1933 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1934 localSavedCrashMsgCount,
1935 POSTCODE_SEVERITY_ERR);
1939 /* get saved crash message offset */
1940 if (visorchannel_read(controlvm_channel,
1941 offsetof(struct spar_controlvm_channel_protocol,
1942 saved_crash_message_offset),
1943 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
1944 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1945 POSTCODE_SEVERITY_ERR);
1949 /* read create device message for storage bus offset */
1950 if (visorchannel_read(controlvm_channel,
1951 localSavedCrashMsgOffset,
1952 &localCrashCreateBusMsg,
1953 sizeof(struct controlvm_message)) < 0) {
1954 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1955 POSTCODE_SEVERITY_ERR);
1959 /* read create device message for storage device */
1960 if (visorchannel_read(controlvm_channel,
1961 localSavedCrashMsgOffset +
1962 sizeof(struct controlvm_message),
1963 &localCrashCreateDevMsg,
1964 sizeof(struct controlvm_message)) < 0) {
1965 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1966 POSTCODE_SEVERITY_ERR);
1970 /* reuse IOVM create bus message */
1971 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
1972 bus_create(&localCrashCreateBusMsg);
1974 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1975 POSTCODE_SEVERITY_ERR);
1979 /* reuse create device message for storage device */
1980 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
1981 my_device_create(&localCrashCreateDevMsg);
1983 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1984 POSTCODE_SEVERITY_ERR);
1987 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1992 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1994 queue_delayed_work(periodic_controlvm_workqueue,
1995 &periodic_controlvm_work, poll_jiffies);
1999 bus_create_response(ulong busNo, int response)
2001 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2005 bus_destroy_response(ulong busNo, int response)
2007 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2011 device_create_response(ulong busNo, ulong devNo, int response)
2013 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2017 device_destroy_response(ulong busNo, ulong devNo, int response)
2019 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2023 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
2025 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2026 bus_no, dev_no, response,
2027 segment_state_standby);
2029 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2032 device_resume_response(ulong busNo, ulong devNo, int response)
2034 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2035 busNo, devNo, response,
2036 segment_state_running);
2040 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2042 void *p = findbus(&bus_info_list, bus_no);
2046 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2049 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2052 visorchipset_set_bus_context(ulong bus_no, void *context)
2054 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2058 p->bus_driver_context = context;
2061 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2064 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2065 struct visorchipset_device_info *dev_info)
2067 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2071 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2074 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2077 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2079 struct visorchipset_device_info *p =
2080 finddevice(&dev_info_list, bus_no, dev_no);
2084 p->bus_driver_context = context;
2087 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2089 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2092 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2102 /* __GFP_NORETRY means "ok to fail", meaning
2103 * kmem_cache_alloc() can return NULL, implying the caller CAN
2104 * cope with failure. If you do NOT specify __GFP_NORETRY,
2105 * Linux will go to extreme measures to get memory for you
2106 * (like, invoke oom killer), which will probably cripple the
2109 gfp |= __GFP_NORETRY;
2110 p = kmem_cache_alloc(pool, gfp);
2114 atomic_inc(&Visorchipset_cache_buffers_in_use);
2118 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2121 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2126 atomic_dec(&Visorchipset_cache_buffers_in_use);
2127 kmem_cache_free(pool, p);
2130 static ssize_t chipsetready_store(struct device *dev,
2131 struct device_attribute *attr, const char *buf, size_t count)
2135 if (sscanf(buf, "%63s", msgtype) != 1)
2138 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2139 chipset_events[0] = 1;
2141 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2142 chipset_events[1] = 1;
2148 /* The parahotplug/devicedisabled interface gets called by our support script
2149 * when an SR-IOV device has been shut down. The ID is passed to the script
2150 * and then passed back when the device has been removed.
2152 static ssize_t devicedisabled_store(struct device *dev,
2153 struct device_attribute *attr, const char *buf, size_t count)
2157 if (kstrtouint(buf, 10, &id) != 0)
2160 parahotplug_request_complete(id, 0);
2164 /* The parahotplug/deviceenabled interface gets called by our support script
2165 * when an SR-IOV device has been recovered. The ID is passed to the script
2166 * and then passed back when the device has been brought back up.
2168 static ssize_t deviceenabled_store(struct device *dev,
2169 struct device_attribute *attr, const char *buf, size_t count)
2173 if (kstrtouint(buf, 10, &id) != 0)
2176 parahotplug_request_complete(id, 1);
2181 visorchipset_init(void)
2186 if (!unisys_spar_platform)
2189 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2190 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2191 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2192 memset(&livedump_info, 0, sizeof(livedump_info));
2193 atomic_set(&livedump_info.buffers_in_use, 0);
2195 if (visorchipset_testvnic) {
2196 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2201 addr = controlvm_get_channel_address();
2204 visorchannel_create_with_lock
2206 sizeof(struct spar_controlvm_channel_protocol),
2207 spar_controlvm_channel_protocol_uuid);
2208 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2209 visorchannel_get_header(controlvm_channel))) {
2210 initialize_controlvm_payload();
2212 visorchannel_destroy(controlvm_channel);
2213 controlvm_channel = NULL;
2220 MajorDev = MKDEV(visorchipset_major, 0);
2221 rc = visorchipset_file_init(MajorDev, &controlvm_channel);
2223 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2227 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2229 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2231 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2233 Putfile_buffer_list_pool =
2234 kmem_cache_create(Putfile_buffer_list_pool_name,
2235 sizeof(struct putfile_buffer_entry),
2236 0, SLAB_HWCACHE_ALIGN, NULL);
2237 if (!Putfile_buffer_list_pool) {
2238 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2242 if (!visorchipset_disable_controlvm) {
2243 /* if booting in a crash kernel */
2244 if (visorchipset_crash_kernel)
2245 INIT_DELAYED_WORK(&periodic_controlvm_work,
2246 setup_crash_devices_work_queue);
2248 INIT_DELAYED_WORK(&periodic_controlvm_work,
2249 controlvm_periodic_work);
2250 periodic_controlvm_workqueue =
2251 create_singlethread_workqueue("visorchipset_controlvm");
2253 if (periodic_controlvm_workqueue == NULL) {
2254 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2259 most_recent_message_jiffies = jiffies;
2260 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2261 rc = queue_delayed_work(periodic_controlvm_workqueue,
2262 &periodic_controlvm_work, poll_jiffies);
2264 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2270 Visorchipset_platform_device.dev.devt = MajorDev;
2271 if (platform_device_register(&Visorchipset_platform_device) < 0) {
2272 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2276 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2280 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2281 POSTCODE_SEVERITY_ERR);
2287 visorchipset_exit(void)
2289 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2291 if (visorchipset_disable_controlvm) {
2294 cancel_delayed_work(&periodic_controlvm_work);
2295 flush_workqueue(periodic_controlvm_workqueue);
2296 destroy_workqueue(periodic_controlvm_workqueue);
2297 periodic_controlvm_workqueue = NULL;
2298 destroy_controlvm_payload_info(&controlvm_payload_info);
2300 if (Putfile_buffer_list_pool) {
2301 kmem_cache_destroy(Putfile_buffer_list_pool);
2302 Putfile_buffer_list_pool = NULL;
2305 cleanup_controlvm_structures();
2307 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2309 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2311 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2313 visorchannel_destroy(controlvm_channel);
2315 visorchipset_file_cleanup();
2316 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2319 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2320 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2321 int visorchipset_testvnic = 0;
2323 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2324 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2325 int visorchipset_testvnicclient = 0;
2327 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2328 MODULE_PARM_DESC(visorchipset_testmsg,
2329 "1 to manufacture the chipset, bus, and switch messages");
2330 int visorchipset_testmsg = 0;
2332 module_param_named(major, visorchipset_major, int, S_IRUGO);
2333 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2334 int visorchipset_major = 0;
2336 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2337 MODULE_PARM_DESC(visorchipset_serverreqwait,
2338 "1 to have the module wait for the visor bus to register");
2339 int visorchipset_serverregwait = 0; /* default is off */
2340 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2341 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2342 int visorchipset_clientregwait = 1; /* default is on */
2343 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2344 MODULE_PARM_DESC(visorchipset_testteardown,
2345 "1 to test teardown of the chipset, bus, and switch");
2346 int visorchipset_testteardown = 0; /* default is off */
2347 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2349 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2350 "1 to disable polling of controlVm channel");
2351 int visorchipset_disable_controlvm = 0; /* default is off */
2352 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2353 MODULE_PARM_DESC(visorchipset_crash_kernel,
2354 "1 means we are running in crash kernel");
2355 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2356 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2358 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2359 "1 to hold response to CHIPSET_READY");
2360 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2361 * response immediately */
2362 module_init(visorchipset_init);
2363 module_exit(visorchipset_exit);
2365 MODULE_AUTHOR("Unisys");
2366 MODULE_LICENSE("GPL");
2367 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2369 MODULE_VERSION(VERSION);