3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode. As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies; /* when we got our last
53 * controlvm message */
62 static int serverregistered;
63 static int clientregistered;
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86 (((uuid_le_cmp(channel_type_guid,\
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
89 spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
92 #define is_diagpool_channel(channel_type_guid) \
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
99 static struct visorchannel *controlvm_channel;
101 struct controlvm_payload_info {
102 u8 __iomem *ptr; /* pointer to base address of payload pool */
103 u64 offset; /* offset from beginning of controlvm
104 * channel to beginning of payload * pool */
105 u32 bytes; /* number of bytes in payload pool */
108 /* Manages the request payload in the controlvm channel */
109 static struct controlvm_payload_info ControlVm_payload_info;
111 static struct channel_header *Test_Vnic_channel;
113 struct livedump_info {
114 struct controlvm_message_header Dumpcapture_header;
115 struct controlvm_message_header Gettextdump_header;
116 struct controlvm_message_header Dumpcomplete_header;
117 BOOL Gettextdump_outstanding;
120 atomic_t buffers_in_use;
123 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
124 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
126 static struct livedump_info LiveDump_info;
128 /* The following globals are used to handle the scenario where we are unable to
129 * offload the payload from a controlvm message due to memory requirements. In
130 * this scenario, we simply stash the controlvm message, then attempt to
131 * process it again the next time controlvm_periodic_work() runs.
133 static struct controlvm_message ControlVm_Pending_Msg;
134 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
136 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
137 * TRANSMIT_FILE PutFile payloads.
139 static struct kmem_cache *Putfile_buffer_list_pool;
140 static const char Putfile_buffer_list_pool_name[] =
141 "controlvm_putfile_buffer_list_pool";
143 /* This identifies a data buffer that has been received via a controlvm messages
144 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
146 struct putfile_buffer_entry {
147 struct list_head next; /* putfile_buffer_entry list */
148 struct parser_context *parser_ctx; /* points to input data buffer */
151 /* List of struct putfile_request *, via next_putfile_request member.
152 * Each entry in this list identifies an outstanding TRANSMIT_FILE
155 static LIST_HEAD(Putfile_request_list);
157 /* This describes a buffer and its current state of transfer (e.g., how many
158 * bytes have already been supplied as putfile data, and how many bytes are
159 * remaining) for a putfile_request.
161 struct putfile_active_buffer {
162 /* a payload from a controlvm message, containing a file data buffer */
163 struct parser_context *parser_ctx;
164 /* points within data area of parser_ctx to next byte of data */
166 /* # bytes left from <pnext> to the end of this data buffer */
167 size_t bytes_remaining;
170 #define PUTFILE_REQUEST_SIG 0x0906101302281211
171 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
172 * conversation. Structs of this type are dynamically linked into
173 * <Putfile_request_list>.
175 struct putfile_request {
176 u64 sig; /* PUTFILE_REQUEST_SIG */
178 /* header from original TransmitFile request */
179 struct controlvm_message_header controlvm_header;
180 u64 file_request_number; /* from original TransmitFile request */
182 /* link to next struct putfile_request */
183 struct list_head next_putfile_request;
185 /* most-recent sequence number supplied via a controlvm message */
186 u64 data_sequence_number;
188 /* head of putfile_buffer_entry list, which describes the data to be
189 * supplied as putfile data;
190 * - this list is added to when controlvm messages come in that supply
192 * - this list is removed from via the hotplug program that is actually
193 * consuming these buffers to write as file data */
194 struct list_head input_buffer_list;
195 spinlock_t req_list_lock; /* lock for input_buffer_list */
197 /* waiters for input_buffer_list to go non-empty */
198 wait_queue_head_t input_buffer_wq;
200 /* data not yet read within current putfile_buffer_entry */
201 struct putfile_active_buffer active_buf;
203 /* <0 = failed, 0 = in-progress, >0 = successful; */
204 /* note that this must be set with req_list_lock, and if you set <0, */
205 /* it is your responsibility to also free up all of the other objects */
206 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
207 /* before releasing the lock */
208 int completion_status;
211 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
213 struct parahotplug_request {
214 struct list_head list;
216 unsigned long expiration;
217 struct controlvm_message msg;
220 static LIST_HEAD(Parahotplug_request_list);
221 static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
222 static void parahotplug_process_list(void);
224 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
225 * CONTROLVM_REPORTEVENT.
227 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
228 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
230 static void bus_create_response(ulong busNo, int response);
231 static void bus_destroy_response(ulong busNo, int response);
232 static void device_create_response(ulong busNo, ulong devNo, int response);
233 static void device_destroy_response(ulong busNo, ulong devNo, int response);
234 static void device_resume_response(ulong busNo, ulong devNo, int response);
236 static struct visorchipset_busdev_responders BusDev_Responders = {
237 .bus_create = bus_create_response,
238 .bus_destroy = bus_destroy_response,
239 .device_create = device_create_response,
240 .device_destroy = device_destroy_response,
241 .device_pause = visorchipset_device_pause_response,
242 .device_resume = device_resume_response,
245 /* info for /dev/visorchipset */
246 static dev_t MajorDev = -1; /**< indicates major num for device */
248 /* prototypes for attributes */
249 static ssize_t toolaction_show(struct device *dev,
250 struct device_attribute *attr, char *buf);
251 static ssize_t toolaction_store(struct device *dev,
252 struct device_attribute *attr, const char *buf, size_t count);
253 static DEVICE_ATTR_RW(toolaction);
255 static ssize_t boottotool_show(struct device *dev,
256 struct device_attribute *attr, char *buf);
257 static ssize_t boottotool_store(struct device *dev,
258 struct device_attribute *attr, const char *buf, size_t count);
259 static DEVICE_ATTR_RW(boottotool);
261 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
263 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
264 const char *buf, size_t count);
265 static DEVICE_ATTR_RW(error);
267 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
269 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
270 const char *buf, size_t count);
271 static DEVICE_ATTR_RW(textid);
273 static ssize_t remaining_steps_show(struct device *dev,
274 struct device_attribute *attr, char *buf);
275 static ssize_t remaining_steps_store(struct device *dev,
276 struct device_attribute *attr, const char *buf, size_t count);
277 static DEVICE_ATTR_RW(remaining_steps);
279 static ssize_t chipsetready_store(struct device *dev,
280 struct device_attribute *attr, const char *buf, size_t count);
281 static DEVICE_ATTR_WO(chipsetready);
283 static ssize_t devicedisabled_store(struct device *dev,
284 struct device_attribute *attr, const char *buf, size_t count);
285 static DEVICE_ATTR_WO(devicedisabled);
287 static ssize_t deviceenabled_store(struct device *dev,
288 struct device_attribute *attr, const char *buf, size_t count);
289 static DEVICE_ATTR_WO(deviceenabled);
291 static struct attribute *visorchipset_install_attrs[] = {
292 &dev_attr_toolaction.attr,
293 &dev_attr_boottotool.attr,
294 &dev_attr_error.attr,
295 &dev_attr_textid.attr,
296 &dev_attr_remaining_steps.attr,
300 static struct attribute_group visorchipset_install_group = {
302 .attrs = visorchipset_install_attrs
305 static struct attribute *visorchipset_guest_attrs[] = {
306 &dev_attr_chipsetready.attr,
310 static struct attribute_group visorchipset_guest_group = {
312 .attrs = visorchipset_guest_attrs
315 static struct attribute *visorchipset_parahotplug_attrs[] = {
316 &dev_attr_devicedisabled.attr,
317 &dev_attr_deviceenabled.attr,
321 static struct attribute_group visorchipset_parahotplug_group = {
322 .name = "parahotplug",
323 .attrs = visorchipset_parahotplug_attrs
326 static const struct attribute_group *visorchipset_dev_groups[] = {
327 &visorchipset_install_group,
328 &visorchipset_guest_group,
329 &visorchipset_parahotplug_group,
333 /* /sys/devices/platform/visorchipset */
334 static struct platform_device Visorchipset_platform_device = {
335 .name = "visorchipset",
337 .dev.groups = visorchipset_dev_groups,
340 /* Function prototypes */
341 static void controlvm_respond(struct controlvm_message_header *msgHdr,
343 static void controlvm_respond_chipset_init(
344 struct controlvm_message_header *msgHdr, int response,
345 enum ultra_chipset_feature features);
346 static void controlvm_respond_physdev_changestate(
347 struct controlvm_message_header *msgHdr, int response,
348 struct spar_segment_state state);
350 static ssize_t toolaction_show(struct device *dev,
351 struct device_attribute *attr,
356 visorchannel_read(controlvm_channel,
357 offsetof(struct spar_controlvm_channel_protocol,
358 tool_action), &toolAction, sizeof(u8));
359 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
362 static ssize_t toolaction_store(struct device *dev,
363 struct device_attribute *attr,
364 const char *buf, size_t count)
369 if (kstrtou8(buf, 10, &toolAction) != 0)
372 ret = visorchannel_write(controlvm_channel,
373 offsetof(struct spar_controlvm_channel_protocol, tool_action),
374 &toolAction, sizeof(u8));
381 static ssize_t boottotool_show(struct device *dev,
382 struct device_attribute *attr,
385 struct efi_spar_indication efiSparIndication;
387 visorchannel_read(controlvm_channel,
388 offsetof(struct spar_controlvm_channel_protocol,
389 efi_spar_ind), &efiSparIndication,
390 sizeof(struct efi_spar_indication));
391 return scnprintf(buf, PAGE_SIZE, "%u\n",
392 efiSparIndication.boot_to_tool);
395 static ssize_t boottotool_store(struct device *dev,
396 struct device_attribute *attr,
397 const char *buf, size_t count)
400 struct efi_spar_indication efiSparIndication;
402 if (kstrtoint(buf, 10, &val) != 0)
405 efiSparIndication.boot_to_tool = val;
406 ret = visorchannel_write(controlvm_channel,
407 offsetof(struct spar_controlvm_channel_protocol,
409 &(efiSparIndication),
410 sizeof(struct efi_spar_indication));
417 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
422 visorchannel_read(controlvm_channel, offsetof(
423 struct spar_controlvm_channel_protocol, installation_error),
424 &error, sizeof(u32));
425 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
428 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
429 const char *buf, size_t count)
434 if (kstrtou32(buf, 10, &error) != 0)
437 ret = visorchannel_write(controlvm_channel,
438 offsetof(struct spar_controlvm_channel_protocol,
440 &error, sizeof(u32));
446 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
451 visorchannel_read(controlvm_channel, offsetof(
452 struct spar_controlvm_channel_protocol, installation_text_id),
453 &textId, sizeof(u32));
454 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
457 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
458 const char *buf, size_t count)
463 if (kstrtou32(buf, 10, &textId) != 0)
466 ret = visorchannel_write(controlvm_channel,
467 offsetof(struct spar_controlvm_channel_protocol,
468 installation_text_id),
469 &textId, sizeof(u32));
476 static ssize_t remaining_steps_show(struct device *dev,
477 struct device_attribute *attr, char *buf)
481 visorchannel_read(controlvm_channel,
482 offsetof(struct spar_controlvm_channel_protocol,
483 installation_remaining_steps),
486 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
489 static ssize_t remaining_steps_store(struct device *dev,
490 struct device_attribute *attr, const char *buf, size_t count)
495 if (kstrtou16(buf, 10, &remainingSteps) != 0)
498 ret = visorchannel_write(controlvm_channel,
499 offsetof(struct spar_controlvm_channel_protocol,
500 installation_remaining_steps),
501 &remainingSteps, sizeof(u16));
511 wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
512 char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
513 wchar_t unicode2[99];
515 /* NOTE: Either due to a bug, or feature I don't understand, the
516 * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
517 * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
520 LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
521 LOGINF("utf8_wcstombs=%d",
522 chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
524 s[chrs] = '\0'; /* GRRRRRRRR */
526 LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
528 unicode2[chrs] = 0; /* GRRRRRRRR */
529 if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
530 LOGINF("strings match... good");
532 LOGINF("strings did not match!!");
537 busInfo_clear(void *v)
539 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
541 if (p->proc_object) {
542 visor_proc_DestroyObject(p->proc_object);
543 p->proc_object = NULL;
548 kfree(p->description);
549 p->description = NULL;
551 p->state.created = 0;
552 memset(p, 0, sizeof(struct visorchipset_bus_info));
556 devInfo_clear(void *v)
558 struct visorchipset_device_info *p =
559 (struct visorchipset_device_info *)(v);
561 p->state.created = 0;
562 memset(p, 0, sizeof(struct visorchipset_device_info));
566 check_chipset_events(void)
570 /* Check events to determine if response should be sent */
571 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
572 send_msg &= chipset_events[i];
577 clear_chipset_events(void)
580 /* Clear chipset_events */
581 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
582 chipset_events[i] = 0;
586 visorchipset_register_busdev_server(
587 struct visorchipset_busdev_notifiers *notifiers,
588 struct visorchipset_busdev_responders *responders,
589 struct ultra_vbus_deviceinfo *driver_info)
591 down(¬ifier_lock);
592 if (notifiers == NULL) {
593 memset(&BusDev_Server_Notifiers, 0,
594 sizeof(BusDev_Server_Notifiers));
595 serverregistered = 0; /* clear flag */
597 BusDev_Server_Notifiers = *notifiers;
598 serverregistered = 1; /* set flag */
601 *responders = BusDev_Responders;
603 bus_device_info_init(driver_info, "chipset", "visorchipset",
608 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
611 visorchipset_register_busdev_client(
612 struct visorchipset_busdev_notifiers *notifiers,
613 struct visorchipset_busdev_responders *responders,
614 struct ultra_vbus_deviceinfo *driver_info)
616 down(¬ifier_lock);
617 if (notifiers == NULL) {
618 memset(&BusDev_Client_Notifiers, 0,
619 sizeof(BusDev_Client_Notifiers));
620 clientregistered = 0; /* clear flag */
622 BusDev_Client_Notifiers = *notifiers;
623 clientregistered = 1; /* set flag */
626 *responders = BusDev_Responders;
628 bus_device_info_init(driver_info, "chipset(bolts)",
629 "visorchipset", VERSION, NULL);
632 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
635 cleanup_controlvm_structures(void)
637 struct visorchipset_bus_info *bi, *tmp_bi;
638 struct visorchipset_device_info *di, *tmp_di;
640 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
642 list_del(&bi->entry);
646 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
648 list_del(&di->entry);
654 chipset_init(struct controlvm_message *inmsg)
656 static int chipset_inited;
657 enum ultra_chipset_feature features = 0;
658 int rc = CONTROLVM_RESP_SUCCESS;
660 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
661 if (chipset_inited) {
662 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
666 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
668 /* Set features to indicate we support parahotplug (if Command
669 * also supports it). */
671 inmsg->cmd.init_chipset.
672 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
674 /* Set the "reply" bit so Command knows this is a
675 * features-aware driver. */
676 features |= ULTRA_CHIPSET_FEATURE_REPLY;
680 cleanup_controlvm_structures();
681 if (inmsg->hdr.flags.response_expected)
682 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
686 controlvm_init_response(struct controlvm_message *msg,
687 struct controlvm_message_header *msgHdr, int response)
689 memset(msg, 0, sizeof(struct controlvm_message));
690 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
691 msg->hdr.payload_bytes = 0;
692 msg->hdr.payload_vm_offset = 0;
693 msg->hdr.payload_max_bytes = 0;
695 msg->hdr.flags.failed = 1;
696 msg->hdr.completion_status = (u32) (-response);
701 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
703 struct controlvm_message outmsg;
705 controlvm_init_response(&outmsg, msgHdr, response);
706 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
707 * back the deviceChangeState structure in the packet. */
708 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
709 g_devicechangestate_packet.device_change_state.bus_no ==
711 g_devicechangestate_packet.device_change_state.dev_no ==
713 outmsg.cmd = g_devicechangestate_packet;
714 if (outmsg.hdr.flags.test_message == 1)
717 if (!visorchannel_signalinsert(controlvm_channel,
718 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
724 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
726 enum ultra_chipset_feature features)
728 struct controlvm_message outmsg;
730 controlvm_init_response(&outmsg, msgHdr, response);
731 outmsg.cmd.init_chipset.features = features;
732 if (!visorchannel_signalinsert(controlvm_channel,
733 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
738 static void controlvm_respond_physdev_changestate(
739 struct controlvm_message_header *msgHdr, int response,
740 struct spar_segment_state state)
742 struct controlvm_message outmsg;
744 controlvm_init_response(&outmsg, msgHdr, response);
745 outmsg.cmd.device_change_state.state = state;
746 outmsg.cmd.device_change_state.flags.phys_device = 1;
747 if (!visorchannel_signalinsert(controlvm_channel,
748 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
754 visorchipset_save_message(struct controlvm_message *msg,
755 enum crash_obj_type type)
757 u32 localSavedCrashMsgOffset;
758 u16 localSavedCrashMsgCount;
760 /* get saved message count */
761 if (visorchannel_read(controlvm_channel,
762 offsetof(struct spar_controlvm_channel_protocol,
763 saved_crash_message_count),
764 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
765 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
766 POSTCODE_SEVERITY_ERR);
770 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
771 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
772 localSavedCrashMsgCount,
773 POSTCODE_SEVERITY_ERR);
777 /* get saved crash message offset */
778 if (visorchannel_read(controlvm_channel,
779 offsetof(struct spar_controlvm_channel_protocol,
780 saved_crash_message_offset),
781 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
782 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
783 POSTCODE_SEVERITY_ERR);
787 if (type == CRASH_BUS) {
788 if (visorchannel_write(controlvm_channel,
789 localSavedCrashMsgOffset,
791 sizeof(struct controlvm_message)) < 0) {
792 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
793 POSTCODE_SEVERITY_ERR);
797 if (visorchannel_write(controlvm_channel,
798 localSavedCrashMsgOffset +
799 sizeof(struct controlvm_message), msg,
800 sizeof(struct controlvm_message)) < 0) {
801 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
802 POSTCODE_SEVERITY_ERR);
807 EXPORT_SYMBOL_GPL(visorchipset_save_message);
810 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
812 struct visorchipset_bus_info *p = NULL;
813 BOOL need_clear = FALSE;
815 p = findbus(&bus_info_list, busNo);
820 if ((cmdId == CONTROLVM_BUS_CREATE) &&
821 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
822 /* undo the row we just created... */
823 delbusdevices(&dev_info_list, busNo);
825 if (cmdId == CONTROLVM_BUS_CREATE)
826 p->state.created = 1;
827 if (cmdId == CONTROLVM_BUS_DESTROY)
831 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
832 return; /* no controlvm response needed */
833 if (p->pending_msg_hdr.id != (u32) cmdId)
835 controlvm_respond(&p->pending_msg_hdr, response);
836 p->pending_msg_hdr.id = CONTROLVM_INVALID;
839 delbusdevices(&dev_info_list, busNo);
844 device_changestate_responder(enum controlvm_id cmdId,
845 ulong busNo, ulong devNo, int response,
846 struct spar_segment_state responseState)
848 struct visorchipset_device_info *p = NULL;
849 struct controlvm_message outmsg;
851 p = finddevice(&dev_info_list, busNo, devNo);
854 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
855 return; /* no controlvm response needed */
856 if (p->pending_msg_hdr.id != cmdId)
859 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
861 outmsg.cmd.device_change_state.bus_no = busNo;
862 outmsg.cmd.device_change_state.dev_no = devNo;
863 outmsg.cmd.device_change_state.state = responseState;
865 if (!visorchannel_signalinsert(controlvm_channel,
866 CONTROLVM_QUEUE_REQUEST, &outmsg))
869 p->pending_msg_hdr.id = CONTROLVM_INVALID;
873 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
876 struct visorchipset_device_info *p = NULL;
877 BOOL need_clear = FALSE;
879 p = finddevice(&dev_info_list, busNo, devNo);
883 if (cmdId == CONTROLVM_DEVICE_CREATE)
884 p->state.created = 1;
885 if (cmdId == CONTROLVM_DEVICE_DESTROY)
889 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
890 return; /* no controlvm response needed */
892 if (p->pending_msg_hdr.id != (u32) cmdId)
895 controlvm_respond(&p->pending_msg_hdr, response);
896 p->pending_msg_hdr.id = CONTROLVM_INVALID;
902 bus_epilog(u32 busNo,
903 u32 cmd, struct controlvm_message_header *msgHdr,
904 int response, BOOL needResponse)
906 BOOL notified = FALSE;
908 struct visorchipset_bus_info *pBusInfo = findbus(&bus_info_list, busNo);
914 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
915 sizeof(struct controlvm_message_header));
917 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
919 down(¬ifier_lock);
920 if (response == CONTROLVM_RESP_SUCCESS) {
922 case CONTROLVM_BUS_CREATE:
923 /* We can't tell from the bus_create
924 * information which of our 2 bus flavors the
925 * devices on this bus will ultimately end up.
926 * FORTUNATELY, it turns out it is harmless to
927 * send the bus_create to both of them. We can
928 * narrow things down a little bit, though,
929 * because we know: - BusDev_Server can handle
930 * either server or client devices
931 * - BusDev_Client can handle ONLY client
933 if (BusDev_Server_Notifiers.bus_create) {
934 (*BusDev_Server_Notifiers.bus_create) (busNo);
937 if ((!pBusInfo->flags.server) /*client */ &&
938 BusDev_Client_Notifiers.bus_create) {
939 (*BusDev_Client_Notifiers.bus_create) (busNo);
943 case CONTROLVM_BUS_DESTROY:
944 if (BusDev_Server_Notifiers.bus_destroy) {
945 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
948 if ((!pBusInfo->flags.server) /*client */ &&
949 BusDev_Client_Notifiers.bus_destroy) {
950 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
957 /* The callback function just called above is responsible
958 * for calling the appropriate visorchipset_busdev_responders
959 * function, which will call bus_responder()
963 bus_responder(cmd, busNo, response);
968 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
969 struct controlvm_message_header *msgHdr, int response,
970 BOOL needResponse, BOOL for_visorbus)
972 struct visorchipset_busdev_notifiers *notifiers = NULL;
973 BOOL notified = FALSE;
975 struct visorchipset_device_info *pDevInfo =
976 finddevice(&dev_info_list, busNo, devNo);
978 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
986 notifiers = &BusDev_Server_Notifiers;
988 notifiers = &BusDev_Client_Notifiers;
990 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
991 sizeof(struct controlvm_message_header));
993 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
995 down(¬ifier_lock);
998 case CONTROLVM_DEVICE_CREATE:
999 if (notifiers->device_create) {
1000 (*notifiers->device_create) (busNo, devNo);
1004 case CONTROLVM_DEVICE_CHANGESTATE:
1005 /* ServerReady / ServerRunning / SegmentStateRunning */
1006 if (state.alive == segment_state_running.alive &&
1008 segment_state_running.operating) {
1009 if (notifiers->device_resume) {
1010 (*notifiers->device_resume) (busNo,
1015 /* ServerNotReady / ServerLost / SegmentStateStandby */
1016 else if (state.alive == segment_state_standby.alive &&
1018 segment_state_standby.operating) {
1019 /* technically this is standby case
1020 * where server is lost
1022 if (notifiers->device_pause) {
1023 (*notifiers->device_pause) (busNo,
1027 } else if (state.alive == segment_state_paused.alive &&
1029 segment_state_paused.operating) {
1030 /* this is lite pause where channel is
1031 * still valid just 'pause' of it
1033 if (busNo == g_diagpool_bus_no &&
1034 devNo == g_diagpool_dev_no) {
1035 /* this will trigger the
1036 * diag_shutdown.sh script in
1037 * the visorchipset hotplug */
1039 (&Visorchipset_platform_device.dev.
1040 kobj, KOBJ_ONLINE, envp);
1044 case CONTROLVM_DEVICE_DESTROY:
1045 if (notifiers->device_destroy) {
1046 (*notifiers->device_destroy) (busNo, devNo);
1053 /* The callback function just called above is responsible
1054 * for calling the appropriate visorchipset_busdev_responders
1055 * function, which will call device_responder()
1059 device_responder(cmd, busNo, devNo, response);
1064 bus_create(struct controlvm_message *inmsg)
1066 struct controlvm_message_packet *cmd = &inmsg->cmd;
1067 ulong busNo = cmd->create_bus.bus_no;
1068 int rc = CONTROLVM_RESP_SUCCESS;
1069 struct visorchipset_bus_info *pBusInfo = NULL;
1072 pBusInfo = findbus(&bus_info_list, busNo);
1073 if (pBusInfo && (pBusInfo->state.created == 1)) {
1074 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1075 POSTCODE_SEVERITY_ERR);
1076 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1079 pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1080 if (pBusInfo == NULL) {
1081 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1082 POSTCODE_SEVERITY_ERR);
1083 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1087 INIT_LIST_HEAD(&pBusInfo->entry);
1088 pBusInfo->bus_no = busNo;
1089 pBusInfo->dev_no = cmd->create_bus.dev_count;
1091 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1093 if (inmsg->hdr.flags.test_message == 1)
1094 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1096 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1098 pBusInfo->flags.server = inmsg->hdr.flags.server;
1099 pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1100 pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1101 pBusInfo->chan_info.channel_type_uuid =
1102 cmd->create_bus.bus_data_type_uuid;
1103 pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1105 list_add(&pBusInfo->entry, &bus_info_list);
1107 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1110 bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1111 rc, inmsg->hdr.flags.response_expected == 1);
1115 bus_destroy(struct controlvm_message *inmsg)
1117 struct controlvm_message_packet *cmd = &inmsg->cmd;
1118 ulong busNo = cmd->destroy_bus.bus_no;
1119 struct visorchipset_bus_info *pBusInfo;
1120 int rc = CONTROLVM_RESP_SUCCESS;
1122 pBusInfo = findbus(&bus_info_list, busNo);
1124 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1127 if (pBusInfo->state.created == 0) {
1128 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1133 bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1134 rc, inmsg->hdr.flags.response_expected == 1);
1138 bus_configure(struct controlvm_message *inmsg,
1139 struct parser_context *parser_ctx)
1141 struct controlvm_message_packet *cmd = &inmsg->cmd;
1142 ulong busNo = cmd->configure_bus.bus_no;
1143 struct visorchipset_bus_info *pBusInfo = NULL;
1144 int rc = CONTROLVM_RESP_SUCCESS;
1147 busNo = cmd->configure_bus.bus_no;
1148 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1150 pBusInfo = findbus(&bus_info_list, busNo);
1152 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1153 POSTCODE_SEVERITY_ERR);
1154 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1157 if (pBusInfo->state.created == 0) {
1158 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1159 POSTCODE_SEVERITY_ERR);
1160 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1163 /* TBD - add this check to other commands also... */
1164 if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1165 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1166 POSTCODE_SEVERITY_ERR);
1167 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1171 pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1172 pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1173 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1174 pBusInfo->name = parser_string_get(parser_ctx);
1176 visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1177 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1179 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1180 rc, inmsg->hdr.flags.response_expected == 1);
1184 my_device_create(struct controlvm_message *inmsg)
1186 struct controlvm_message_packet *cmd = &inmsg->cmd;
1187 ulong busNo = cmd->create_device.bus_no;
1188 ulong devNo = cmd->create_device.dev_no;
1189 struct visorchipset_device_info *pDevInfo = NULL;
1190 struct visorchipset_bus_info *pBusInfo = NULL;
1191 int rc = CONTROLVM_RESP_SUCCESS;
1193 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1194 if (pDevInfo && (pDevInfo->state.created == 1)) {
1195 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1196 POSTCODE_SEVERITY_ERR);
1197 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1200 pBusInfo = findbus(&bus_info_list, busNo);
1202 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1203 POSTCODE_SEVERITY_ERR);
1204 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1207 if (pBusInfo->state.created == 0) {
1208 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1209 POSTCODE_SEVERITY_ERR);
1210 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1213 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1214 if (pDevInfo == NULL) {
1215 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1216 POSTCODE_SEVERITY_ERR);
1217 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1221 INIT_LIST_HEAD(&pDevInfo->entry);
1222 pDevInfo->bus_no = busNo;
1223 pDevInfo->dev_no = devNo;
1224 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1225 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1226 POSTCODE_SEVERITY_INFO);
1228 if (inmsg->hdr.flags.test_message == 1)
1229 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1231 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1232 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1233 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1234 pDevInfo->chan_info.channel_type_uuid =
1235 cmd->create_device.data_type_uuid;
1236 pDevInfo->chan_info.intr = cmd->create_device.intr;
1237 list_add(&pDevInfo->entry, &dev_info_list);
1238 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1239 POSTCODE_SEVERITY_INFO);
1241 /* get the bus and devNo for DiagPool channel */
1243 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1244 g_diagpool_bus_no = busNo;
1245 g_diagpool_dev_no = devNo;
1247 device_epilog(busNo, devNo, segment_state_running,
1248 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1249 inmsg->hdr.flags.response_expected == 1,
1250 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1254 my_device_changestate(struct controlvm_message *inmsg)
1256 struct controlvm_message_packet *cmd = &inmsg->cmd;
1257 ulong busNo = cmd->device_change_state.bus_no;
1258 ulong devNo = cmd->device_change_state.dev_no;
1259 struct spar_segment_state state = cmd->device_change_state.state;
1260 struct visorchipset_device_info *pDevInfo = NULL;
1261 int rc = CONTROLVM_RESP_SUCCESS;
1263 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1265 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1266 POSTCODE_SEVERITY_ERR);
1267 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1270 if (pDevInfo->state.created == 0) {
1271 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1272 POSTCODE_SEVERITY_ERR);
1273 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1276 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1277 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1279 inmsg->hdr.flags.response_expected == 1,
1281 pDevInfo->chan_info.channel_type_uuid));
1285 my_device_destroy(struct controlvm_message *inmsg)
1287 struct controlvm_message_packet *cmd = &inmsg->cmd;
1288 ulong busNo = cmd->destroy_device.bus_no;
1289 ulong devNo = cmd->destroy_device.dev_no;
1290 struct visorchipset_device_info *pDevInfo = NULL;
1291 int rc = CONTROLVM_RESP_SUCCESS;
1293 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1295 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1298 if (pDevInfo->state.created == 0) {
1299 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1303 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1304 device_epilog(busNo, devNo, segment_state_running,
1305 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1306 inmsg->hdr.flags.response_expected == 1,
1308 pDevInfo->chan_info.channel_type_uuid));
1311 /* When provided with the physical address of the controlvm channel
1312 * (phys_addr), the offset to the payload area we need to manage
1313 * (offset), and the size of this payload area (bytes), fills in the
1314 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1318 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1319 struct controlvm_payload_info *info)
1321 u8 __iomem *payload = NULL;
1322 int rc = CONTROLVM_RESP_SUCCESS;
1325 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1328 memset(info, 0, sizeof(struct controlvm_payload_info));
1329 if ((offset == 0) || (bytes == 0)) {
1330 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1333 payload = ioremap_cache(phys_addr + offset, bytes);
1334 if (payload == NULL) {
1335 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1339 info->offset = offset;
1340 info->bytes = bytes;
1341 info->ptr = payload;
1345 if (payload != NULL) {
1354 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1356 if (info->ptr != NULL) {
1360 memset(info, 0, sizeof(struct controlvm_payload_info));
1364 initialize_controlvm_payload(void)
1366 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1367 u64 payloadOffset = 0;
1368 u32 payloadBytes = 0;
1370 if (visorchannel_read(controlvm_channel,
1371 offsetof(struct spar_controlvm_channel_protocol,
1372 request_payload_offset),
1373 &payloadOffset, sizeof(payloadOffset)) < 0) {
1374 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1375 POSTCODE_SEVERITY_ERR);
1378 if (visorchannel_read(controlvm_channel,
1379 offsetof(struct spar_controlvm_channel_protocol,
1380 request_payload_bytes),
1381 &payloadBytes, sizeof(payloadBytes)) < 0) {
1382 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1383 POSTCODE_SEVERITY_ERR);
1386 initialize_controlvm_payload_info(phys_addr,
1387 payloadOffset, payloadBytes,
1388 &ControlVm_payload_info);
1391 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1392 * Returns CONTROLVM_RESP_xxx code.
1395 visorchipset_chipset_ready(void)
1397 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1398 return CONTROLVM_RESP_SUCCESS;
1400 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1403 visorchipset_chipset_selftest(void)
1405 char env_selftest[20];
1406 char *envp[] = { env_selftest, NULL };
1408 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1409 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1411 return CONTROLVM_RESP_SUCCESS;
1413 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1415 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1416 * Returns CONTROLVM_RESP_xxx code.
1419 visorchipset_chipset_notready(void)
1421 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1422 return CONTROLVM_RESP_SUCCESS;
1424 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1427 chipset_ready(struct controlvm_message_header *msgHdr)
1429 int rc = visorchipset_chipset_ready();
1431 if (rc != CONTROLVM_RESP_SUCCESS)
1433 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1434 controlvm_respond(msgHdr, rc);
1435 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1436 /* Send CHIPSET_READY response when all modules have been loaded
1437 * and disks mounted for the partition
1439 g_chipset_msg_hdr = *msgHdr;
1444 chipset_selftest(struct controlvm_message_header *msgHdr)
1446 int rc = visorchipset_chipset_selftest();
1448 if (rc != CONTROLVM_RESP_SUCCESS)
1450 if (msgHdr->flags.response_expected)
1451 controlvm_respond(msgHdr, rc);
1455 chipset_notready(struct controlvm_message_header *msgHdr)
1457 int rc = visorchipset_chipset_notready();
1459 if (rc != CONTROLVM_RESP_SUCCESS)
1461 if (msgHdr->flags.response_expected)
1462 controlvm_respond(msgHdr, rc);
1465 /* This is your "one-stop" shop for grabbing the next message from the
1466 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1469 read_controlvm_event(struct controlvm_message *msg)
1471 if (visorchannel_signalremove(controlvm_channel,
1472 CONTROLVM_QUEUE_EVENT, msg)) {
1474 if (msg->hdr.flags.test_message == 1)
1482 * The general parahotplug flow works as follows. The visorchipset
1483 * driver receives a DEVICE_CHANGESTATE message from Command
1484 * specifying a physical device to enable or disable. The CONTROLVM
1485 * message handler calls parahotplug_process_message, which then adds
1486 * the message to a global list and kicks off a udev event which
1487 * causes a user level script to enable or disable the specified
1488 * device. The udev script then writes to
1489 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1490 * to get called, at which point the appropriate CONTROLVM message is
1491 * retrieved from the list and responded to.
1494 #define PARAHOTPLUG_TIMEOUT_MS 2000
1497 * Generate unique int to match an outstanding CONTROLVM message with a
1498 * udev script /proc response
1501 parahotplug_next_id(void)
1503 static atomic_t id = ATOMIC_INIT(0);
1505 return atomic_inc_return(&id);
1509 * Returns the time (in jiffies) when a CONTROLVM message on the list
1510 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1512 static unsigned long
1513 parahotplug_next_expiration(void)
1515 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1519 * Create a parahotplug_request, which is basically a wrapper for a
1520 * CONTROLVM_MESSAGE that we can stick on a list
1522 static struct parahotplug_request *
1523 parahotplug_request_create(struct controlvm_message *msg)
1525 struct parahotplug_request *req;
1527 req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
1531 req->id = parahotplug_next_id();
1532 req->expiration = parahotplug_next_expiration();
1539 * Free a parahotplug_request.
1542 parahotplug_request_destroy(struct parahotplug_request *req)
1548 * Cause uevent to run the user level script to do the disable/enable
1549 * specified in (the CONTROLVM message in) the specified
1550 * parahotplug_request
1553 parahotplug_request_kickoff(struct parahotplug_request *req)
1555 struct controlvm_message_packet *cmd = &req->msg.cmd;
1556 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1559 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1562 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1563 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1564 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1565 cmd->device_change_state.state.active);
1566 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1567 cmd->device_change_state.bus_no);
1568 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1569 cmd->device_change_state.dev_no >> 3);
1570 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1571 cmd->device_change_state.dev_no & 0x7);
1573 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1578 * Remove any request from the list that's been on there too long and
1579 * respond with an error.
1582 parahotplug_process_list(void)
1584 struct list_head *pos = NULL;
1585 struct list_head *tmp = NULL;
1587 spin_lock(&Parahotplug_request_list_lock);
1589 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1590 struct parahotplug_request *req =
1591 list_entry(pos, struct parahotplug_request, list);
1592 if (time_after_eq(jiffies, req->expiration)) {
1594 if (req->msg.hdr.flags.response_expected)
1595 controlvm_respond_physdev_changestate(
1597 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1598 req->msg.cmd.device_change_state.state);
1599 parahotplug_request_destroy(req);
1603 spin_unlock(&Parahotplug_request_list_lock);
1607 * Called from the /proc handler, which means the user script has
1608 * finished the enable/disable. Find the matching identifier, and
1609 * respond to the CONTROLVM message with success.
1612 parahotplug_request_complete(int id, u16 active)
1614 struct list_head *pos = NULL;
1615 struct list_head *tmp = NULL;
1617 spin_lock(&Parahotplug_request_list_lock);
1619 /* Look for a request matching "id". */
1620 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1621 struct parahotplug_request *req =
1622 list_entry(pos, struct parahotplug_request, list);
1623 if (req->id == id) {
1624 /* Found a match. Remove it from the list and
1628 spin_unlock(&Parahotplug_request_list_lock);
1629 req->msg.cmd.device_change_state.state.active = active;
1630 if (req->msg.hdr.flags.response_expected)
1631 controlvm_respond_physdev_changestate(
1632 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1633 req->msg.cmd.device_change_state.state);
1634 parahotplug_request_destroy(req);
1639 spin_unlock(&Parahotplug_request_list_lock);
1644 * Enables or disables a PCI device by kicking off a udev script
1647 parahotplug_process_message(struct controlvm_message *inmsg)
1649 struct parahotplug_request *req;
1651 req = parahotplug_request_create(inmsg);
1656 if (inmsg->cmd.device_change_state.state.active) {
1657 /* For enable messages, just respond with success
1658 * right away. This is a bit of a hack, but there are
1659 * issues with the early enable messages we get (with
1660 * either the udev script not detecting that the device
1661 * is up, or not getting called at all). Fortunately
1662 * the messages that get lost don't matter anyway, as
1663 * devices are automatically enabled at
1666 parahotplug_request_kickoff(req);
1667 controlvm_respond_physdev_changestate(&inmsg->hdr,
1668 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1669 device_change_state.state);
1670 parahotplug_request_destroy(req);
1672 /* For disable messages, add the request to the
1673 * request list before kicking off the udev script. It
1674 * won't get responded to until the script has
1675 * indicated it's done.
1677 spin_lock(&Parahotplug_request_list_lock);
1678 list_add_tail(&(req->list), &Parahotplug_request_list);
1679 spin_unlock(&Parahotplug_request_list_lock);
1681 parahotplug_request_kickoff(req);
1685 /* Process a controlvm message.
1687 * FALSE - this function will return FALSE only in the case where the
1688 * controlvm message was NOT processed, but processing must be
1689 * retried before reading the next controlvm message; a
1690 * scenario where this can occur is when we need to throttle
1691 * the allocation of memory in which to copy out controlvm
1693 * TRUE - processing of the controlvm message completed,
1694 * either successfully or with an error.
1697 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1699 struct controlvm_message_packet *cmd = &inmsg.cmd;
1700 u64 parametersAddr = 0;
1701 u32 parametersBytes = 0;
1702 struct parser_context *parser_ctx = NULL;
1703 BOOL isLocalAddr = FALSE;
1704 struct controlvm_message ackmsg;
1706 /* create parsing context if necessary */
1707 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1708 if (channel_addr == 0)
1710 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1711 parametersBytes = inmsg.hdr.payload_bytes;
1713 /* Parameter and channel addresses within test messages actually lie
1714 * within our OS-controlled memory. We need to know that, because it
1715 * makes a difference in how we compute the virtual address.
1717 if (parametersAddr != 0 && parametersBytes != 0) {
1721 parser_init_byte_stream(parametersAddr, parametersBytes,
1722 isLocalAddr, &retry);
1723 if (!parser_ctx && retry)
1728 controlvm_init_response(&ackmsg, &inmsg.hdr,
1729 CONTROLVM_RESP_SUCCESS);
1730 if (controlvm_channel)
1731 visorchannel_signalinsert(controlvm_channel,
1732 CONTROLVM_QUEUE_ACK,
1735 switch (inmsg.hdr.id) {
1736 case CONTROLVM_CHIPSET_INIT:
1737 chipset_init(&inmsg);
1739 case CONTROLVM_BUS_CREATE:
1742 case CONTROLVM_BUS_DESTROY:
1743 bus_destroy(&inmsg);
1745 case CONTROLVM_BUS_CONFIGURE:
1746 bus_configure(&inmsg, parser_ctx);
1748 case CONTROLVM_DEVICE_CREATE:
1749 my_device_create(&inmsg);
1751 case CONTROLVM_DEVICE_CHANGESTATE:
1752 if (cmd->device_change_state.flags.phys_device) {
1753 parahotplug_process_message(&inmsg);
1755 /* save the hdr and cmd structures for later use */
1756 /* when sending back the response to Command */
1757 my_device_changestate(&inmsg);
1758 g_diag_msg_hdr = inmsg.hdr;
1759 g_devicechangestate_packet = inmsg.cmd;
1763 case CONTROLVM_DEVICE_DESTROY:
1764 my_device_destroy(&inmsg);
1766 case CONTROLVM_DEVICE_CONFIGURE:
1767 /* no op for now, just send a respond that we passed */
1768 if (inmsg.hdr.flags.response_expected)
1769 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1771 case CONTROLVM_CHIPSET_READY:
1772 chipset_ready(&inmsg.hdr);
1774 case CONTROLVM_CHIPSET_SELFTEST:
1775 chipset_selftest(&inmsg.hdr);
1777 case CONTROLVM_CHIPSET_STOP:
1778 chipset_notready(&inmsg.hdr);
1781 if (inmsg.hdr.flags.response_expected)
1782 controlvm_respond(&inmsg.hdr,
1783 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1787 if (parser_ctx != NULL) {
1788 parser_done(parser_ctx);
1794 static HOSTADDRESS controlvm_get_channel_address(void)
1799 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1806 controlvm_periodic_work(struct work_struct *work)
1808 struct controlvm_message inmsg;
1809 BOOL gotACommand = FALSE;
1810 BOOL handle_command_failed = FALSE;
1811 static u64 Poll_Count;
1813 /* make sure visorbus server is registered for controlvm callbacks */
1814 if (visorchipset_serverregwait && !serverregistered)
1816 /* make sure visorclientbus server is regsitered for controlvm
1819 if (visorchipset_clientregwait && !clientregistered)
1823 if (Poll_Count >= 250)
1828 /* Check events to determine if response to CHIPSET_READY
1831 if (visorchipset_holdchipsetready &&
1832 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1833 if (check_chipset_events() == 1) {
1834 controlvm_respond(&g_chipset_msg_hdr, 0);
1835 clear_chipset_events();
1836 memset(&g_chipset_msg_hdr, 0,
1837 sizeof(struct controlvm_message_header));
1841 while (visorchannel_signalremove(controlvm_channel,
1842 CONTROLVM_QUEUE_RESPONSE,
1846 if (ControlVm_Pending_Msg_Valid) {
1847 /* we throttled processing of a prior
1848 * msg, so try to process it again
1849 * rather than reading a new one
1851 inmsg = ControlVm_Pending_Msg;
1852 ControlVm_Pending_Msg_Valid = FALSE;
1855 gotACommand = read_controlvm_event(&inmsg);
1858 handle_command_failed = FALSE;
1859 while (gotACommand && (!handle_command_failed)) {
1860 most_recent_message_jiffies = jiffies;
1861 if (handle_command(inmsg,
1862 visorchannel_get_physaddr
1863 (controlvm_channel)))
1864 gotACommand = read_controlvm_event(&inmsg);
1866 /* this is a scenario where throttling
1867 * is required, but probably NOT an
1868 * error...; we stash the current
1869 * controlvm msg so we will attempt to
1870 * reprocess it on our next loop
1872 handle_command_failed = TRUE;
1873 ControlVm_Pending_Msg = inmsg;
1874 ControlVm_Pending_Msg_Valid = TRUE;
1878 /* parahotplug_worker */
1879 parahotplug_process_list();
1883 if (time_after(jiffies,
1884 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1885 /* it's been longer than MIN_IDLE_SECONDS since we
1886 * processed our last controlvm message; slow down the
1889 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1890 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1892 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1893 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1896 queue_delayed_work(periodic_controlvm_workqueue,
1897 &periodic_controlvm_work, poll_jiffies);
1901 setup_crash_devices_work_queue(struct work_struct *work)
1904 struct controlvm_message localCrashCreateBusMsg;
1905 struct controlvm_message localCrashCreateDevMsg;
1906 struct controlvm_message msg;
1907 u32 localSavedCrashMsgOffset;
1908 u16 localSavedCrashMsgCount;
1910 /* make sure visorbus server is registered for controlvm callbacks */
1911 if (visorchipset_serverregwait && !serverregistered)
1914 /* make sure visorclientbus server is regsitered for controlvm
1917 if (visorchipset_clientregwait && !clientregistered)
1920 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1922 /* send init chipset msg */
1923 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1924 msg.cmd.init_chipset.bus_count = 23;
1925 msg.cmd.init_chipset.switch_count = 0;
1929 /* get saved message count */
1930 if (visorchannel_read(controlvm_channel,
1931 offsetof(struct spar_controlvm_channel_protocol,
1932 saved_crash_message_count),
1933 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
1934 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1935 POSTCODE_SEVERITY_ERR);
1939 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
1940 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1941 localSavedCrashMsgCount,
1942 POSTCODE_SEVERITY_ERR);
1946 /* get saved crash message offset */
1947 if (visorchannel_read(controlvm_channel,
1948 offsetof(struct spar_controlvm_channel_protocol,
1949 saved_crash_message_offset),
1950 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
1951 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1952 POSTCODE_SEVERITY_ERR);
1956 /* read create device message for storage bus offset */
1957 if (visorchannel_read(controlvm_channel,
1958 localSavedCrashMsgOffset,
1959 &localCrashCreateBusMsg,
1960 sizeof(struct controlvm_message)) < 0) {
1961 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1962 POSTCODE_SEVERITY_ERR);
1966 /* read create device message for storage device */
1967 if (visorchannel_read(controlvm_channel,
1968 localSavedCrashMsgOffset +
1969 sizeof(struct controlvm_message),
1970 &localCrashCreateDevMsg,
1971 sizeof(struct controlvm_message)) < 0) {
1972 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1973 POSTCODE_SEVERITY_ERR);
1977 /* reuse IOVM create bus message */
1978 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
1979 bus_create(&localCrashCreateBusMsg);
1981 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1982 POSTCODE_SEVERITY_ERR);
1986 /* reuse create device message for storage device */
1987 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
1988 my_device_create(&localCrashCreateDevMsg);
1990 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1991 POSTCODE_SEVERITY_ERR);
1994 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1999 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2001 queue_delayed_work(periodic_controlvm_workqueue,
2002 &periodic_controlvm_work, poll_jiffies);
2006 bus_create_response(ulong busNo, int response)
2008 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2012 bus_destroy_response(ulong busNo, int response)
2014 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2018 device_create_response(ulong busNo, ulong devNo, int response)
2020 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2024 device_destroy_response(ulong busNo, ulong devNo, int response)
2026 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2030 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
2033 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2034 bus_no, dev_no, response,
2035 segment_state_standby);
2037 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2040 device_resume_response(ulong busNo, ulong devNo, int response)
2042 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2043 busNo, devNo, response,
2044 segment_state_running);
2048 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2050 void *p = findbus(&bus_info_list, bus_no);
2054 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2057 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2060 visorchipset_set_bus_context(ulong bus_no, void *context)
2062 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2066 p->bus_driver_context = context;
2069 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2072 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2073 struct visorchipset_device_info *dev_info)
2075 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2079 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2082 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2085 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2087 struct visorchipset_device_info *p =
2088 finddevice(&dev_info_list, bus_no, dev_no);
2092 p->bus_driver_context = context;
2095 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2097 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2100 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2110 /* __GFP_NORETRY means "ok to fail", meaning
2111 * kmem_cache_alloc() can return NULL, implying the caller CAN
2112 * cope with failure. If you do NOT specify __GFP_NORETRY,
2113 * Linux will go to extreme measures to get memory for you
2114 * (like, invoke oom killer), which will probably cripple the
2117 gfp |= __GFP_NORETRY;
2118 p = kmem_cache_alloc(pool, gfp);
2122 atomic_inc(&Visorchipset_cache_buffers_in_use);
2126 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2129 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2134 atomic_dec(&Visorchipset_cache_buffers_in_use);
2135 kmem_cache_free(pool, p);
2138 static ssize_t chipsetready_store(struct device *dev,
2139 struct device_attribute *attr, const char *buf, size_t count)
2143 if (sscanf(buf, "%63s", msgtype) != 1)
2146 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2147 chipset_events[0] = 1;
2149 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2150 chipset_events[1] = 1;
2156 /* The parahotplug/devicedisabled interface gets called by our support script
2157 * when an SR-IOV device has been shut down. The ID is passed to the script
2158 * and then passed back when the device has been removed.
2160 static ssize_t devicedisabled_store(struct device *dev,
2161 struct device_attribute *attr, const char *buf, size_t count)
2165 if (kstrtouint(buf, 10, &id) != 0)
2168 parahotplug_request_complete(id, 0);
2172 /* The parahotplug/deviceenabled interface gets called by our support script
2173 * when an SR-IOV device has been recovered. The ID is passed to the script
2174 * and then passed back when the device has been brought back up.
2176 static ssize_t deviceenabled_store(struct device *dev,
2177 struct device_attribute *attr, const char *buf, size_t count)
2181 if (kstrtouint(buf, 10, &id) != 0)
2184 parahotplug_request_complete(id, 1);
2189 visorchipset_init(void)
2194 if (!unisys_spar_platform)
2197 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2198 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2199 memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
2200 memset(&LiveDump_info, 0, sizeof(LiveDump_info));
2201 atomic_set(&LiveDump_info.buffers_in_use, 0);
2203 if (visorchipset_testvnic) {
2204 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2209 addr = controlvm_get_channel_address();
2212 visorchannel_create_with_lock
2214 sizeof(struct spar_controlvm_channel_protocol),
2215 spar_controlvm_channel_protocol_uuid);
2216 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2217 visorchannel_get_header(controlvm_channel))) {
2218 initialize_controlvm_payload();
2220 visorchannel_destroy(controlvm_channel);
2221 controlvm_channel = NULL;
2228 MajorDev = MKDEV(visorchipset_major, 0);
2229 rc = visorchipset_file_init(MajorDev, &controlvm_channel);
2231 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2235 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2237 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2239 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2241 Putfile_buffer_list_pool =
2242 kmem_cache_create(Putfile_buffer_list_pool_name,
2243 sizeof(struct putfile_buffer_entry),
2244 0, SLAB_HWCACHE_ALIGN, NULL);
2245 if (!Putfile_buffer_list_pool) {
2246 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2250 if (!visorchipset_disable_controlvm) {
2251 /* if booting in a crash kernel */
2252 if (visorchipset_crash_kernel)
2253 INIT_DELAYED_WORK(&periodic_controlvm_work,
2254 setup_crash_devices_work_queue);
2256 INIT_DELAYED_WORK(&periodic_controlvm_work,
2257 controlvm_periodic_work);
2258 periodic_controlvm_workqueue =
2259 create_singlethread_workqueue("visorchipset_controlvm");
2261 if (periodic_controlvm_workqueue == NULL) {
2262 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2267 most_recent_message_jiffies = jiffies;
2268 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2269 rc = queue_delayed_work(periodic_controlvm_workqueue,
2270 &periodic_controlvm_work, poll_jiffies);
2272 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2279 Visorchipset_platform_device.dev.devt = MajorDev;
2280 if (platform_device_register(&Visorchipset_platform_device) < 0) {
2281 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2285 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2289 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2290 POSTCODE_SEVERITY_ERR);
2296 visorchipset_exit(void)
2298 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2300 if (visorchipset_disable_controlvm) {
2303 cancel_delayed_work(&periodic_controlvm_work);
2304 flush_workqueue(periodic_controlvm_workqueue);
2305 destroy_workqueue(periodic_controlvm_workqueue);
2306 periodic_controlvm_workqueue = NULL;
2307 destroy_controlvm_payload_info(&ControlVm_payload_info);
2309 Test_Vnic_channel = NULL;
2310 if (Putfile_buffer_list_pool) {
2311 kmem_cache_destroy(Putfile_buffer_list_pool);
2312 Putfile_buffer_list_pool = NULL;
2315 cleanup_controlvm_structures();
2317 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2319 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2321 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2323 visorchannel_destroy(controlvm_channel);
2325 visorchipset_file_cleanup();
2326 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2329 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2330 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2331 int visorchipset_testvnic = 0;
2333 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2334 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2335 int visorchipset_testvnicclient = 0;
2337 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2338 MODULE_PARM_DESC(visorchipset_testmsg,
2339 "1 to manufacture the chipset, bus, and switch messages");
2340 int visorchipset_testmsg = 0;
2342 module_param_named(major, visorchipset_major, int, S_IRUGO);
2343 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2344 int visorchipset_major = 0;
2346 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2347 MODULE_PARM_DESC(visorchipset_serverreqwait,
2348 "1 to have the module wait for the visor bus to register");
2349 int visorchipset_serverregwait = 0; /* default is off */
2350 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2351 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2352 int visorchipset_clientregwait = 1; /* default is on */
2353 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2354 MODULE_PARM_DESC(visorchipset_testteardown,
2355 "1 to test teardown of the chipset, bus, and switch");
2356 int visorchipset_testteardown = 0; /* default is off */
2357 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2359 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2360 "1 to disable polling of controlVm channel");
2361 int visorchipset_disable_controlvm = 0; /* default is off */
2362 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2363 MODULE_PARM_DESC(visorchipset_crash_kernel,
2364 "1 means we are running in crash kernel");
2365 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2366 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2368 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2369 "1 to hold response to CHIPSET_READY");
2370 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2371 * response immediately */
2372 module_init(visorchipset_init);
2373 module_exit(visorchipset_exit);
2375 MODULE_AUTHOR("Unisys");
2376 MODULE_LICENSE("GPL");
2377 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2379 MODULE_VERSION(VERSION);