3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
27 #include "controlvmcompletionstatus.h"
28 #include "guestlinuxdebug.h"
30 #include <linux/nls.h>
31 #include <linux/netdevice.h>
32 #include <linux/platform_device.h>
33 #include <linux/uuid.h>
35 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
37 * vnic loopback test */
38 #define TEST_VNIC_SWITCHNO 1
39 #define TEST_VNIC_BUSNO 9
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
47 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
48 * we switch to slow polling mode. As soon as we get a controlvm
49 * message, we switch back to fast polling mode.
51 #define MIN_IDLE_SECONDS 10
52 static ulong Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
53 static ulong Most_recent_message_jiffies; /* when we got our last
54 * controlvm message */
63 static int serverregistered;
64 static int clientregistered;
66 #define MAX_CHIPSET_EVENTS 2
67 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
69 static struct delayed_work Periodic_controlvm_work;
70 static struct workqueue_struct *Periodic_controlvm_workqueue;
71 static DEFINE_SEMAPHORE(NotifierLock);
74 struct controlvm_message message;
78 static struct controlvm_message_header g_DiagMsgHdr;
79 static struct controlvm_message_header g_ChipSetMsgHdr;
80 static struct controlvm_message_header g_DelDumpMsgHdr;
81 static const uuid_le UltraDiagPoolChannelProtocolGuid =
82 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
83 /* 0xffffff is an invalid Bus/Device number */
84 static ulong g_diagpoolBusNo = 0xffffff;
85 static ulong g_diagpoolDevNo = 0xffffff;
86 static struct controlvm_message_packet g_DeviceChangeStatePacket;
88 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
91 #define FOR_VISORHACKBUS(channel_type_guid) \
92 (((uuid_le_cmp(channel_type_guid,\
93 spar_vnic_channel_protocol_uuid) == 0)\
94 || (uuid_le_cmp(channel_type_guid,\
95 spar_vhba_channel_protocol_uuid) == 0)))
96 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
98 #define is_diagpool_channel(channel_type_guid) \
99 (uuid_le_cmp(channel_type_guid, UltraDiagPoolChannelProtocolGuid) == 0)
101 static LIST_HEAD(BusInfoList);
102 static LIST_HEAD(DevInfoList);
104 static struct visorchannel *ControlVm_channel;
107 u8 __iomem *ptr; /* pointer to base address of payload pool */
108 u64 offset; /* offset from beginning of controlvm
109 * channel to beginning of payload * pool */
110 u32 bytes; /* number of bytes in payload pool */
111 } CONTROLVM_PAYLOAD_INFO;
113 /* Manages the request payload in the controlvm channel */
114 static CONTROLVM_PAYLOAD_INFO ControlVm_payload_info;
116 static struct channel_header *Test_Vnic_channel;
119 struct controlvm_message_header Dumpcapture_header;
120 struct controlvm_message_header Gettextdump_header;
121 struct controlvm_message_header Dumpcomplete_header;
122 BOOL Gettextdump_outstanding;
125 atomic_t buffers_in_use;
128 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
129 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
131 static LIVEDUMP_INFO LiveDump_info;
133 /* The following globals are used to handle the scenario where we are unable to
134 * offload the payload from a controlvm message due to memory requirements. In
135 * this scenario, we simply stash the controlvm message, then attempt to
136 * process it again the next time controlvm_periodic_work() runs.
138 static struct controlvm_message ControlVm_Pending_Msg;
139 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
141 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
142 * TRANSMIT_FILE PutFile payloads.
144 static struct kmem_cache *Putfile_buffer_list_pool;
145 static const char Putfile_buffer_list_pool_name[] =
146 "controlvm_putfile_buffer_list_pool";
148 /* This identifies a data buffer that has been received via a controlvm messages
149 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
151 struct putfile_buffer_entry {
152 struct list_head next; /* putfile_buffer_entry list */
153 PARSER_CONTEXT *parser_ctx; /* points to buffer containing input data */
156 /* List of struct putfile_request *, via next_putfile_request member.
157 * Each entry in this list identifies an outstanding TRANSMIT_FILE
160 static LIST_HEAD(Putfile_request_list);
162 /* This describes a buffer and its current state of transfer (e.g., how many
163 * bytes have already been supplied as putfile data, and how many bytes are
164 * remaining) for a putfile_request.
166 struct putfile_active_buffer {
167 /* a payload from a controlvm message, containing a file data buffer */
168 PARSER_CONTEXT *parser_ctx;
169 /* points within data area of parser_ctx to next byte of data */
171 /* # bytes left from <pnext> to the end of this data buffer */
172 size_t bytes_remaining;
175 #define PUTFILE_REQUEST_SIG 0x0906101302281211
176 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
177 * conversation. Structs of this type are dynamically linked into
178 * <Putfile_request_list>.
180 struct putfile_request {
181 u64 sig; /* PUTFILE_REQUEST_SIG */
183 /* header from original TransmitFile request */
184 struct controlvm_message_header controlvm_header;
185 u64 file_request_number; /* from original TransmitFile request */
187 /* link to next struct putfile_request */
188 struct list_head next_putfile_request;
190 /* most-recent sequence number supplied via a controlvm message */
191 u64 data_sequence_number;
193 /* head of putfile_buffer_entry list, which describes the data to be
194 * supplied as putfile data;
195 * - this list is added to when controlvm messages come in that supply
197 * - this list is removed from via the hotplug program that is actually
198 * consuming these buffers to write as file data */
199 struct list_head input_buffer_list;
200 spinlock_t req_list_lock; /* lock for input_buffer_list */
202 /* waiters for input_buffer_list to go non-empty */
203 wait_queue_head_t input_buffer_wq;
205 /* data not yet read within current putfile_buffer_entry */
206 struct putfile_active_buffer active_buf;
208 /* <0 = failed, 0 = in-progress, >0 = successful; */
209 /* note that this must be set with req_list_lock, and if you set <0, */
210 /* it is your responsibility to also free up all of the other objects */
211 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
212 /* before releasing the lock */
213 int completion_status;
216 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
218 struct parahotplug_request {
219 struct list_head list;
221 unsigned long expiration;
222 struct controlvm_message msg;
225 static LIST_HEAD(Parahotplug_request_list);
226 static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
227 static void parahotplug_process_list(void);
229 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
230 * CONTROLVM_REPORTEVENT.
232 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
233 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
235 static void bus_create_response(ulong busNo, int response);
236 static void bus_destroy_response(ulong busNo, int response);
237 static void device_create_response(ulong busNo, ulong devNo, int response);
238 static void device_destroy_response(ulong busNo, ulong devNo, int response);
239 static void device_resume_response(ulong busNo, ulong devNo, int response);
241 static struct visorchipset_busdev_responders BusDev_Responders = {
242 .bus_create = bus_create_response,
243 .bus_destroy = bus_destroy_response,
244 .device_create = device_create_response,
245 .device_destroy = device_destroy_response,
246 .device_pause = visorchipset_device_pause_response,
247 .device_resume = device_resume_response,
250 /* info for /dev/visorchipset */
251 static dev_t MajorDev = -1; /**< indicates major num for device */
253 /* prototypes for attributes */
254 static ssize_t toolaction_show(struct device *dev,
255 struct device_attribute *attr, char *buf);
256 static ssize_t toolaction_store(struct device *dev,
257 struct device_attribute *attr, const char *buf, size_t count);
258 static DEVICE_ATTR_RW(toolaction);
260 static ssize_t boottotool_show(struct device *dev,
261 struct device_attribute *attr, char *buf);
262 static ssize_t boottotool_store(struct device *dev,
263 struct device_attribute *attr, const char *buf, size_t count);
264 static DEVICE_ATTR_RW(boottotool);
266 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
268 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
269 const char *buf, size_t count);
270 static DEVICE_ATTR_RW(error);
272 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
274 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
275 const char *buf, size_t count);
276 static DEVICE_ATTR_RW(textid);
278 static ssize_t remaining_steps_show(struct device *dev,
279 struct device_attribute *attr, char *buf);
280 static ssize_t remaining_steps_store(struct device *dev,
281 struct device_attribute *attr, const char *buf, size_t count);
282 static DEVICE_ATTR_RW(remaining_steps);
284 static ssize_t chipsetready_store(struct device *dev,
285 struct device_attribute *attr, const char *buf, size_t count);
286 static DEVICE_ATTR_WO(chipsetready);
288 static ssize_t devicedisabled_store(struct device *dev,
289 struct device_attribute *attr, const char *buf, size_t count);
290 static DEVICE_ATTR_WO(devicedisabled);
292 static ssize_t deviceenabled_store(struct device *dev,
293 struct device_attribute *attr, const char *buf, size_t count);
294 static DEVICE_ATTR_WO(deviceenabled);
296 static struct attribute *visorchipset_install_attrs[] = {
297 &dev_attr_toolaction.attr,
298 &dev_attr_boottotool.attr,
299 &dev_attr_error.attr,
300 &dev_attr_textid.attr,
301 &dev_attr_remaining_steps.attr,
305 static struct attribute_group visorchipset_install_group = {
307 .attrs = visorchipset_install_attrs
310 static struct attribute *visorchipset_guest_attrs[] = {
311 &dev_attr_chipsetready.attr,
315 static struct attribute_group visorchipset_guest_group = {
317 .attrs = visorchipset_guest_attrs
320 static struct attribute *visorchipset_parahotplug_attrs[] = {
321 &dev_attr_devicedisabled.attr,
322 &dev_attr_deviceenabled.attr,
326 static struct attribute_group visorchipset_parahotplug_group = {
327 .name = "parahotplug",
328 .attrs = visorchipset_parahotplug_attrs
331 static const struct attribute_group *visorchipset_dev_groups[] = {
332 &visorchipset_install_group,
333 &visorchipset_guest_group,
334 &visorchipset_parahotplug_group,
338 /* /sys/devices/platform/visorchipset */
339 static struct platform_device Visorchipset_platform_device = {
340 .name = "visorchipset",
342 .dev.groups = visorchipset_dev_groups,
345 /* Function prototypes */
346 static void controlvm_respond(struct controlvm_message_header *msgHdr,
348 static void controlvm_respond_chipset_init(
349 struct controlvm_message_header *msgHdr, int response,
350 enum ultra_chipset_feature features);
351 static void controlvm_respond_physdev_changestate(
352 struct controlvm_message_header *msgHdr, int response,
353 struct spar_segment_state state);
355 static ssize_t toolaction_show(struct device *dev,
356 struct device_attribute *attr,
361 visorchannel_read(ControlVm_channel,
362 offsetof(struct spar_controlvm_channel_protocol,
363 tool_action), &toolAction, sizeof(u8));
364 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
367 static ssize_t toolaction_store(struct device *dev,
368 struct device_attribute *attr,
369 const char *buf, size_t count)
374 if (kstrtou8(buf, 10, &toolAction) != 0)
377 ret = visorchannel_write(ControlVm_channel,
378 offsetof(struct spar_controlvm_channel_protocol, tool_action),
379 &toolAction, sizeof(u8));
386 static ssize_t boottotool_show(struct device *dev,
387 struct device_attribute *attr,
390 struct efi_spar_indication efiSparIndication;
392 visorchannel_read(ControlVm_channel,
393 offsetof(struct spar_controlvm_channel_protocol,
394 efi_spar_ind), &efiSparIndication,
395 sizeof(struct efi_spar_indication));
396 return scnprintf(buf, PAGE_SIZE, "%u\n",
397 efiSparIndication.boot_to_tool);
400 static ssize_t boottotool_store(struct device *dev,
401 struct device_attribute *attr,
402 const char *buf, size_t count)
405 struct efi_spar_indication efiSparIndication;
407 if (kstrtoint(buf, 10, &val) != 0)
410 efiSparIndication.boot_to_tool = val;
411 ret = visorchannel_write(ControlVm_channel,
412 offsetof(struct spar_controlvm_channel_protocol,
414 &(efiSparIndication),
415 sizeof(struct efi_spar_indication));
422 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
427 visorchannel_read(ControlVm_channel, offsetof(
428 struct spar_controlvm_channel_protocol, installation_error),
429 &error, sizeof(u32));
430 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
433 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
434 const char *buf, size_t count)
439 if (kstrtou32(buf, 10, &error) != 0)
442 ret = visorchannel_write(ControlVm_channel,
443 offsetof(struct spar_controlvm_channel_protocol,
445 &error, sizeof(u32));
451 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
456 visorchannel_read(ControlVm_channel, offsetof(
457 struct spar_controlvm_channel_protocol, installation_text_id),
458 &textId, sizeof(u32));
459 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
462 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
463 const char *buf, size_t count)
468 if (kstrtou32(buf, 10, &textId) != 0)
471 ret = visorchannel_write(ControlVm_channel,
472 offsetof(struct spar_controlvm_channel_protocol,
473 installation_text_id),
474 &textId, sizeof(u32));
481 static ssize_t remaining_steps_show(struct device *dev,
482 struct device_attribute *attr, char *buf)
486 visorchannel_read(ControlVm_channel,
487 offsetof(struct spar_controlvm_channel_protocol,
488 installation_remaining_steps),
491 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
494 static ssize_t remaining_steps_store(struct device *dev,
495 struct device_attribute *attr, const char *buf, size_t count)
500 if (kstrtou16(buf, 10, &remainingSteps) != 0)
503 ret = visorchannel_write(ControlVm_channel,
504 offsetof(struct spar_controlvm_channel_protocol,
505 installation_remaining_steps),
506 &remainingSteps, sizeof(u16));
516 wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
517 char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
518 wchar_t unicode2[99];
520 /* NOTE: Either due to a bug, or feature I don't understand, the
521 * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
522 * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
525 LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
526 LOGINF("utf8_wcstombs=%d",
527 chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
529 s[chrs] = '\0'; /* GRRRRRRRR */
531 LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
533 unicode2[chrs] = 0; /* GRRRRRRRR */
534 if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
535 LOGINF("strings match... good");
537 LOGINF("strings did not match!!");
542 busInfo_clear(void *v)
544 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
546 if (p->proc_object) {
547 visor_proc_DestroyObject(p->proc_object);
548 p->proc_object = NULL;
553 kfree(p->description);
554 p->description = NULL;
556 p->state.created = 0;
557 memset(p, 0, sizeof(struct visorchipset_bus_info));
561 devInfo_clear(void *v)
563 struct visorchipset_device_info *p =
564 (struct visorchipset_device_info *)(v);
566 p->state.created = 0;
567 memset(p, 0, sizeof(struct visorchipset_device_info));
571 check_chipset_events(void)
575 /* Check events to determine if response should be sent */
576 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
577 send_msg &= chipset_events[i];
582 clear_chipset_events(void)
585 /* Clear chipset_events */
586 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
587 chipset_events[i] = 0;
591 visorchipset_register_busdev_server(
592 struct visorchipset_busdev_notifiers *notifiers,
593 struct visorchipset_busdev_responders *responders,
594 struct ultra_vbus_deviceinfo *driver_info)
597 if (notifiers == NULL) {
598 memset(&BusDev_Server_Notifiers, 0,
599 sizeof(BusDev_Server_Notifiers));
600 serverregistered = 0; /* clear flag */
602 BusDev_Server_Notifiers = *notifiers;
603 serverregistered = 1; /* set flag */
606 *responders = BusDev_Responders;
608 bus_device_info_init(driver_info, "chipset", "visorchipset",
613 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
616 visorchipset_register_busdev_client(
617 struct visorchipset_busdev_notifiers *notifiers,
618 struct visorchipset_busdev_responders *responders,
619 struct ultra_vbus_deviceinfo *driver_info)
622 if (notifiers == NULL) {
623 memset(&BusDev_Client_Notifiers, 0,
624 sizeof(BusDev_Client_Notifiers));
625 clientregistered = 0; /* clear flag */
627 BusDev_Client_Notifiers = *notifiers;
628 clientregistered = 1; /* set flag */
631 *responders = BusDev_Responders;
633 bus_device_info_init(driver_info, "chipset(bolts)",
634 "visorchipset", VERSION, NULL);
637 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
640 cleanup_controlvm_structures(void)
642 struct visorchipset_bus_info *bi, *tmp_bi;
643 struct visorchipset_device_info *di, *tmp_di;
645 list_for_each_entry_safe(bi, tmp_bi, &BusInfoList, entry) {
647 list_del(&bi->entry);
651 list_for_each_entry_safe(di, tmp_di, &DevInfoList, entry) {
653 list_del(&di->entry);
659 chipset_init(struct controlvm_message *inmsg)
661 static int chipset_inited;
662 enum ultra_chipset_feature features = 0;
663 int rc = CONTROLVM_RESP_SUCCESS;
665 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
666 if (chipset_inited) {
667 LOGERR("CONTROLVM_CHIPSET_INIT Failed: Already Done.");
668 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
672 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
674 /* Set features to indicate we support parahotplug (if Command
675 * also supports it). */
677 inmsg->cmd.init_chipset.
678 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
680 /* Set the "reply" bit so Command knows this is a
681 * features-aware driver. */
682 features |= ULTRA_CHIPSET_FEATURE_REPLY;
686 cleanup_controlvm_structures();
687 if (inmsg->hdr.flags.response_expected)
688 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
692 controlvm_init_response(struct controlvm_message *msg,
693 struct controlvm_message_header *msgHdr, int response)
695 memset(msg, 0, sizeof(struct controlvm_message));
696 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
697 msg->hdr.payload_bytes = 0;
698 msg->hdr.payload_vm_offset = 0;
699 msg->hdr.payload_max_bytes = 0;
701 msg->hdr.flags.failed = 1;
702 msg->hdr.completion_status = (u32) (-response);
707 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
709 struct controlvm_message outmsg;
711 controlvm_init_response(&outmsg, msgHdr, response);
712 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
713 * back the deviceChangeState structure in the packet. */
714 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE
715 && g_DeviceChangeStatePacket.device_change_state.bus_no ==
717 && g_DeviceChangeStatePacket.device_change_state.dev_no ==
719 outmsg.cmd = g_DeviceChangeStatePacket;
720 if (outmsg.hdr.flags.test_message == 1) {
721 LOGINF("%s controlvm_msg=0x%x response=%d for test message",
722 __func__, outmsg.hdr.id, response);
725 if (!visorchannel_signalinsert(ControlVm_channel,
726 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
727 LOGERR("signalinsert failed!");
733 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
735 enum ultra_chipset_feature features)
737 struct controlvm_message outmsg;
739 controlvm_init_response(&outmsg, msgHdr, response);
740 outmsg.cmd.init_chipset.features = features;
741 if (!visorchannel_signalinsert(ControlVm_channel,
742 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
743 LOGERR("signalinsert failed!");
748 static void controlvm_respond_physdev_changestate(
749 struct controlvm_message_header *msgHdr, int response,
750 struct spar_segment_state state)
752 struct controlvm_message outmsg;
754 controlvm_init_response(&outmsg, msgHdr, response);
755 outmsg.cmd.device_change_state.state = state;
756 outmsg.cmd.device_change_state.flags.phys_device = 1;
757 if (!visorchannel_signalinsert(ControlVm_channel,
758 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
759 LOGERR("signalinsert failed!");
765 visorchipset_save_message(struct controlvm_message *msg,
766 enum crash_obj_type type)
768 u32 localSavedCrashMsgOffset;
769 u16 localSavedCrashMsgCount;
771 /* get saved message count */
772 if (visorchannel_read(ControlVm_channel,
773 offsetof(struct spar_controlvm_channel_protocol,
774 saved_crash_message_count),
775 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
776 LOGERR("failed to get Saved Message Count");
777 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
778 POSTCODE_SEVERITY_ERR);
782 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
783 LOGERR("Saved Message Count incorrect %d",
784 localSavedCrashMsgCount);
785 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
786 localSavedCrashMsgCount,
787 POSTCODE_SEVERITY_ERR);
791 /* get saved crash message offset */
792 if (visorchannel_read(ControlVm_channel,
793 offsetof(struct spar_controlvm_channel_protocol,
794 saved_crash_message_offset),
795 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
796 LOGERR("failed to get Saved Message Offset");
797 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
798 POSTCODE_SEVERITY_ERR);
802 if (type == CRASH_BUS) {
803 if (visorchannel_write(ControlVm_channel,
804 localSavedCrashMsgOffset,
806 sizeof(struct controlvm_message)) < 0) {
807 LOGERR("SAVE_MSG_BUS_FAILURE: Failed to write CrashCreateBusMsg!");
808 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
809 POSTCODE_SEVERITY_ERR);
813 if (visorchannel_write(ControlVm_channel,
814 localSavedCrashMsgOffset +
815 sizeof(struct controlvm_message), msg,
816 sizeof(struct controlvm_message)) < 0) {
817 LOGERR("SAVE_MSG_DEV_FAILURE: Failed to write CrashCreateDevMsg!");
818 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
819 POSTCODE_SEVERITY_ERR);
824 EXPORT_SYMBOL_GPL(visorchipset_save_message);
827 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
829 struct visorchipset_bus_info *p = NULL;
830 BOOL need_clear = FALSE;
832 p = findbus(&BusInfoList, busNo);
834 LOGERR("internal error busNo=%lu", busNo);
838 if ((cmdId == CONTROLVM_BUS_CREATE) &&
839 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
840 /* undo the row we just created... */
841 delbusdevices(&DevInfoList, busNo);
843 if (cmdId == CONTROLVM_BUS_CREATE)
844 p->state.created = 1;
845 if (cmdId == CONTROLVM_BUS_DESTROY)
849 if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
850 LOGERR("bus_responder no pending msg");
851 return; /* no controlvm response needed */
853 if (p->pending_msg_hdr.id != (u32) cmdId) {
854 LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
857 controlvm_respond(&p->pending_msg_hdr, response);
858 p->pending_msg_hdr.id = CONTROLVM_INVALID;
861 delbusdevices(&DevInfoList, busNo);
866 device_changestate_responder(enum controlvm_id cmdId,
867 ulong busNo, ulong devNo, int response,
868 struct spar_segment_state responseState)
870 struct visorchipset_device_info *p = NULL;
871 struct controlvm_message outmsg;
873 p = finddevice(&DevInfoList, busNo, devNo);
875 LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
878 if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
879 LOGERR("device_responder no pending msg");
880 return; /* no controlvm response needed */
882 if (p->pending_msg_hdr.id != cmdId) {
883 LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
887 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
889 outmsg.cmd.device_change_state.bus_no = busNo;
890 outmsg.cmd.device_change_state.dev_no = devNo;
891 outmsg.cmd.device_change_state.state = responseState;
893 if (!visorchannel_signalinsert(ControlVm_channel,
894 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
895 LOGERR("signalinsert failed!");
899 p->pending_msg_hdr.id = CONTROLVM_INVALID;
903 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
906 struct visorchipset_device_info *p = NULL;
907 BOOL need_clear = FALSE;
909 p = finddevice(&DevInfoList, busNo, devNo);
911 LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
915 if (cmdId == CONTROLVM_DEVICE_CREATE)
916 p->state.created = 1;
917 if (cmdId == CONTROLVM_DEVICE_DESTROY)
921 if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
922 LOGERR("device_responder no pending msg");
923 return; /* no controlvm response needed */
925 if (p->pending_msg_hdr.id != (u32) cmdId) {
926 LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
929 controlvm_respond(&p->pending_msg_hdr, response);
930 p->pending_msg_hdr.id = CONTROLVM_INVALID;
936 bus_epilog(u32 busNo,
937 u32 cmd, struct controlvm_message_header *msgHdr,
938 int response, BOOL needResponse)
940 BOOL notified = FALSE;
942 struct visorchipset_bus_info *pBusInfo = findbus(&BusInfoList, busNo);
945 LOGERR("HUH? bad busNo=%d", busNo);
949 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
950 sizeof(struct controlvm_message_header));
952 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
955 if (response == CONTROLVM_RESP_SUCCESS) {
957 case CONTROLVM_BUS_CREATE:
958 /* We can't tell from the bus_create
959 * information which of our 2 bus flavors the
960 * devices on this bus will ultimately end up.
961 * FORTUNATELY, it turns out it is harmless to
962 * send the bus_create to both of them. We can
963 * narrow things down a little bit, though,
964 * because we know: - BusDev_Server can handle
965 * either server or client devices
966 * - BusDev_Client can handle ONLY client
968 if (BusDev_Server_Notifiers.bus_create) {
969 (*BusDev_Server_Notifiers.bus_create) (busNo);
972 if ((!pBusInfo->flags.server) /*client */ &&
973 BusDev_Client_Notifiers.bus_create) {
974 (*BusDev_Client_Notifiers.bus_create) (busNo);
978 case CONTROLVM_BUS_DESTROY:
979 if (BusDev_Server_Notifiers.bus_destroy) {
980 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
983 if ((!pBusInfo->flags.server) /*client */ &&
984 BusDev_Client_Notifiers.bus_destroy) {
985 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
992 /* The callback function just called above is responsible
993 * for calling the appropriate visorchipset_busdev_responders
994 * function, which will call bus_responder()
998 bus_responder(cmd, busNo, response);
1003 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
1004 struct controlvm_message_header *msgHdr, int response,
1005 BOOL needResponse, BOOL for_visorbus)
1007 struct visorchipset_busdev_notifiers *notifiers = NULL;
1008 BOOL notified = FALSE;
1010 struct visorchipset_device_info *pDevInfo =
1011 finddevice(&DevInfoList, busNo, devNo);
1013 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1018 LOGERR("HUH? bad busNo=%d, devNo=%d", busNo, devNo);
1022 notifiers = &BusDev_Server_Notifiers;
1024 notifiers = &BusDev_Client_Notifiers;
1026 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
1027 sizeof(struct controlvm_message_header));
1029 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
1031 down(&NotifierLock);
1032 if (response >= 0) {
1034 case CONTROLVM_DEVICE_CREATE:
1035 if (notifiers->device_create) {
1036 (*notifiers->device_create) (busNo, devNo);
1040 case CONTROLVM_DEVICE_CHANGESTATE:
1041 /* ServerReady / ServerRunning / SegmentStateRunning */
1042 if (state.alive == segment_state_running.alive &&
1044 segment_state_running.operating) {
1045 if (notifiers->device_resume) {
1046 (*notifiers->device_resume) (busNo,
1051 /* ServerNotReady / ServerLost / SegmentStateStandby */
1052 else if (state.alive == segment_state_standby.alive &&
1054 segment_state_standby.operating) {
1055 /* technically this is standby case
1056 * where server is lost
1058 if (notifiers->device_pause) {
1059 (*notifiers->device_pause) (busNo,
1063 } else if (state.alive == segment_state_paused.alive &&
1065 segment_state_paused.operating) {
1066 /* this is lite pause where channel is
1067 * still valid just 'pause' of it
1069 if (busNo == g_diagpoolBusNo
1070 && devNo == g_diagpoolDevNo) {
1071 LOGINF("DEVICE_CHANGESTATE(DiagpoolChannel busNo=%d devNo=%d is pausing...)",
1073 /* this will trigger the
1074 * diag_shutdown.sh script in
1075 * the visorchipset hotplug */
1077 (&Visorchipset_platform_device.dev.
1078 kobj, KOBJ_ONLINE, envp);
1082 case CONTROLVM_DEVICE_DESTROY:
1083 if (notifiers->device_destroy) {
1084 (*notifiers->device_destroy) (busNo, devNo);
1091 /* The callback function just called above is responsible
1092 * for calling the appropriate visorchipset_busdev_responders
1093 * function, which will call device_responder()
1097 device_responder(cmd, busNo, devNo, response);
1102 bus_create(struct controlvm_message *inmsg)
1104 struct controlvm_message_packet *cmd = &inmsg->cmd;
1105 ulong busNo = cmd->create_bus.bus_no;
1106 int rc = CONTROLVM_RESP_SUCCESS;
1107 struct visorchipset_bus_info *pBusInfo = NULL;
1110 pBusInfo = findbus(&BusInfoList, busNo);
1111 if (pBusInfo && (pBusInfo->state.created == 1)) {
1112 LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu already exists",
1114 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1115 POSTCODE_SEVERITY_ERR);
1116 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1119 pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1120 if (pBusInfo == NULL) {
1121 LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu kzalloc failed",
1123 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1124 POSTCODE_SEVERITY_ERR);
1125 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1129 INIT_LIST_HEAD(&pBusInfo->entry);
1130 pBusInfo->bus_no = busNo;
1131 pBusInfo->dev_no = cmd->create_bus.dev_count;
1133 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1135 if (inmsg->hdr.flags.test_message == 1)
1136 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1138 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1140 pBusInfo->flags.server = inmsg->hdr.flags.server;
1141 pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1142 pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1143 pBusInfo->chan_info.channel_type_uuid =
1144 cmd->create_bus.bus_data_type_uuid;
1145 pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1147 list_add(&pBusInfo->entry, &BusInfoList);
1149 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1152 bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1153 rc, inmsg->hdr.flags.response_expected == 1);
1157 bus_destroy(struct controlvm_message *inmsg)
1159 struct controlvm_message_packet *cmd = &inmsg->cmd;
1160 ulong busNo = cmd->destroy_bus.bus_no;
1161 struct visorchipset_bus_info *pBusInfo;
1162 int rc = CONTROLVM_RESP_SUCCESS;
1164 pBusInfo = findbus(&BusInfoList, busNo);
1166 LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu invalid", busNo);
1167 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1170 if (pBusInfo->state.created == 0) {
1171 LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu already destroyed",
1173 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1178 bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1179 rc, inmsg->hdr.flags.response_expected == 1);
1183 bus_configure(struct controlvm_message *inmsg, PARSER_CONTEXT *parser_ctx)
1185 struct controlvm_message_packet *cmd = &inmsg->cmd;
1186 ulong busNo = cmd->configure_bus.bus_no;
1187 struct visorchipset_bus_info *pBusInfo = NULL;
1188 int rc = CONTROLVM_RESP_SUCCESS;
1191 busNo = cmd->configure_bus.bus_no;
1192 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1194 pBusInfo = findbus(&BusInfoList, busNo);
1196 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu invalid",
1198 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1199 POSTCODE_SEVERITY_ERR);
1200 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1203 if (pBusInfo->state.created == 0) {
1204 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: Invalid bus %lu - not created yet",
1206 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1207 POSTCODE_SEVERITY_ERR);
1208 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1211 /* TBD - add this check to other commands also... */
1212 if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1213 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu MsgId=%u outstanding",
1214 busNo, (uint) pBusInfo->pending_msg_hdr.id);
1215 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1216 POSTCODE_SEVERITY_ERR);
1217 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1221 pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1222 pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1223 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1224 pBusInfo->name = parser_string_get(parser_ctx);
1226 visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1227 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1229 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1230 rc, inmsg->hdr.flags.response_expected == 1);
1234 my_device_create(struct controlvm_message *inmsg)
1236 struct controlvm_message_packet *cmd = &inmsg->cmd;
1237 ulong busNo = cmd->create_device.bus_no;
1238 ulong devNo = cmd->create_device.dev_no;
1239 struct visorchipset_device_info *pDevInfo = NULL;
1240 struct visorchipset_bus_info *pBusInfo = NULL;
1241 int rc = CONTROLVM_RESP_SUCCESS;
1243 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1244 if (pDevInfo && (pDevInfo->state.created == 1)) {
1245 LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu already exists",
1247 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1248 POSTCODE_SEVERITY_ERR);
1249 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1252 pBusInfo = findbus(&BusInfoList, busNo);
1254 LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - out of range",
1256 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1257 POSTCODE_SEVERITY_ERR);
1258 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1261 if (pBusInfo->state.created == 0) {
1262 LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - not created yet",
1264 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1265 POSTCODE_SEVERITY_ERR);
1266 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1269 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1270 if (pDevInfo == NULL) {
1271 LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu kmaloc failed",
1273 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1274 POSTCODE_SEVERITY_ERR);
1275 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1279 INIT_LIST_HEAD(&pDevInfo->entry);
1280 pDevInfo->bus_no = busNo;
1281 pDevInfo->dev_no = devNo;
1282 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1283 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1284 POSTCODE_SEVERITY_INFO);
1286 if (inmsg->hdr.flags.test_message == 1)
1287 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1289 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1290 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1291 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1292 pDevInfo->chan_info.channel_type_uuid =
1293 cmd->create_device.data_type_uuid;
1294 pDevInfo->chan_info.intr = cmd->create_device.intr;
1295 list_add(&pDevInfo->entry, &DevInfoList);
1296 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1297 POSTCODE_SEVERITY_INFO);
1299 /* get the bus and devNo for DiagPool channel */
1301 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1302 g_diagpoolBusNo = busNo;
1303 g_diagpoolDevNo = devNo;
1304 LOGINF("CONTROLVM_DEVICE_CREATE for DiagPool channel: busNo=%lu, devNo=%lu",
1305 g_diagpoolBusNo, g_diagpoolDevNo);
1307 device_epilog(busNo, devNo, segment_state_running,
1308 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1309 inmsg->hdr.flags.response_expected == 1,
1310 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1314 my_device_changestate(struct controlvm_message *inmsg)
1316 struct controlvm_message_packet *cmd = &inmsg->cmd;
1317 ulong busNo = cmd->device_change_state.bus_no;
1318 ulong devNo = cmd->device_change_state.dev_no;
1319 struct spar_segment_state state = cmd->device_change_state.state;
1320 struct visorchipset_device_info *pDevInfo = NULL;
1321 int rc = CONTROLVM_RESP_SUCCESS;
1323 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1325 LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (doesn't exist)",
1327 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1328 POSTCODE_SEVERITY_ERR);
1329 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1332 if (pDevInfo->state.created == 0) {
1333 LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (not created)",
1335 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1336 POSTCODE_SEVERITY_ERR);
1337 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1340 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1341 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1343 inmsg->hdr.flags.response_expected == 1,
1345 pDevInfo->chan_info.channel_type_uuid));
1349 my_device_destroy(struct controlvm_message *inmsg)
1351 struct controlvm_message_packet *cmd = &inmsg->cmd;
1352 ulong busNo = cmd->destroy_device.bus_no;
1353 ulong devNo = cmd->destroy_device.dev_no;
1354 struct visorchipset_device_info *pDevInfo = NULL;
1355 int rc = CONTROLVM_RESP_SUCCESS;
1357 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1359 LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu invalid",
1361 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1364 if (pDevInfo->state.created == 0) {
1365 LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu already destroyed",
1367 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1371 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1372 device_epilog(busNo, devNo, segment_state_running,
1373 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1374 inmsg->hdr.flags.response_expected == 1,
1376 pDevInfo->chan_info.channel_type_uuid));
1379 /* When provided with the physical address of the controlvm channel
1380 * (phys_addr), the offset to the payload area we need to manage
1381 * (offset), and the size of this payload area (bytes), fills in the
1382 * CONTROLVM_PAYLOAD_INFO struct. Returns TRUE for success or FALSE
1386 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1387 CONTROLVM_PAYLOAD_INFO *info)
1389 u8 __iomem *payload = NULL;
1390 int rc = CONTROLVM_RESP_SUCCESS;
1393 LOGERR("HUH ? CONTROLVM_PAYLOAD_INIT Failed : Programmer check at %s:%d",
1394 __FILE__, __LINE__);
1395 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1398 memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
1399 if ((offset == 0) || (bytes == 0)) {
1400 LOGERR("CONTROLVM_PAYLOAD_INIT Failed: request_payload_offset=%llu request_payload_bytes=%llu!",
1401 (u64) offset, (u64) bytes);
1402 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1405 payload = ioremap_cache(phys_addr + offset, bytes);
1406 if (payload == NULL) {
1407 LOGERR("CONTROLVM_PAYLOAD_INIT Failed: ioremap_cache %llu for %llu bytes failed",
1408 (u64) offset, (u64) bytes);
1409 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1413 info->offset = offset;
1414 info->bytes = bytes;
1415 info->ptr = payload;
1416 LOGINF("offset=%llu, bytes=%lu, ptr=%p",
1417 (u64) (info->offset), (ulong) (info->bytes), info->ptr);
1421 if (payload != NULL) {
1430 destroy_controlvm_payload_info(CONTROLVM_PAYLOAD_INFO *info)
1432 if (info->ptr != NULL) {
1436 memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
1440 initialize_controlvm_payload(void)
1442 HOSTADDRESS phys_addr = visorchannel_get_physaddr(ControlVm_channel);
1443 u64 payloadOffset = 0;
1444 u32 payloadBytes = 0;
1446 if (visorchannel_read(ControlVm_channel,
1447 offsetof(struct spar_controlvm_channel_protocol,
1448 request_payload_offset),
1449 &payloadOffset, sizeof(payloadOffset)) < 0) {
1450 LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
1451 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1452 POSTCODE_SEVERITY_ERR);
1455 if (visorchannel_read(ControlVm_channel,
1456 offsetof(struct spar_controlvm_channel_protocol,
1457 request_payload_bytes),
1458 &payloadBytes, sizeof(payloadBytes)) < 0) {
1459 LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
1460 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1461 POSTCODE_SEVERITY_ERR);
1464 initialize_controlvm_payload_info(phys_addr,
1465 payloadOffset, payloadBytes,
1466 &ControlVm_payload_info);
1469 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1470 * Returns CONTROLVM_RESP_xxx code.
1473 visorchipset_chipset_ready(void)
1475 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1476 return CONTROLVM_RESP_SUCCESS;
1478 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1481 visorchipset_chipset_selftest(void)
1483 char env_selftest[20];
1484 char *envp[] = { env_selftest, NULL };
1486 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1487 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1489 return CONTROLVM_RESP_SUCCESS;
1491 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1493 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1494 * Returns CONTROLVM_RESP_xxx code.
1497 visorchipset_chipset_notready(void)
1499 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1500 return CONTROLVM_RESP_SUCCESS;
1502 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1505 chipset_ready(struct controlvm_message_header *msgHdr)
1507 int rc = visorchipset_chipset_ready();
1509 if (rc != CONTROLVM_RESP_SUCCESS)
1511 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1512 controlvm_respond(msgHdr, rc);
1513 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1514 /* Send CHIPSET_READY response when all modules have been loaded
1515 * and disks mounted for the partition
1517 g_ChipSetMsgHdr = *msgHdr;
1518 LOGINF("Holding CHIPSET_READY response");
1523 chipset_selftest(struct controlvm_message_header *msgHdr)
1525 int rc = visorchipset_chipset_selftest();
1527 if (rc != CONTROLVM_RESP_SUCCESS)
1529 if (msgHdr->flags.response_expected)
1530 controlvm_respond(msgHdr, rc);
1534 chipset_notready(struct controlvm_message_header *msgHdr)
1536 int rc = visorchipset_chipset_notready();
1538 if (rc != CONTROLVM_RESP_SUCCESS)
1540 if (msgHdr->flags.response_expected)
1541 controlvm_respond(msgHdr, rc);
1544 /* This is your "one-stop" shop for grabbing the next message from the
1545 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1548 read_controlvm_event(struct controlvm_message *msg)
1550 if (visorchannel_signalremove(ControlVm_channel,
1551 CONTROLVM_QUEUE_EVENT, msg)) {
1553 if (msg->hdr.flags.test_message == 1) {
1554 LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)",
1564 * The general parahotplug flow works as follows. The visorchipset
1565 * driver receives a DEVICE_CHANGESTATE message from Command
1566 * specifying a physical device to enable or disable. The CONTROLVM
1567 * message handler calls parahotplug_process_message, which then adds
1568 * the message to a global list and kicks off a udev event which
1569 * causes a user level script to enable or disable the specified
1570 * device. The udev script then writes to
1571 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1572 * to get called, at which point the appropriate CONTROLVM message is
1573 * retrieved from the list and responded to.
1576 #define PARAHOTPLUG_TIMEOUT_MS 2000
1579 * Generate unique int to match an outstanding CONTROLVM message with a
1580 * udev script /proc response
1583 parahotplug_next_id(void)
1585 static atomic_t id = ATOMIC_INIT(0);
1587 return atomic_inc_return(&id);
1591 * Returns the time (in jiffies) when a CONTROLVM message on the list
1592 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1594 static unsigned long
1595 parahotplug_next_expiration(void)
1597 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1601 * Create a parahotplug_request, which is basically a wrapper for a
1602 * CONTROLVM_MESSAGE that we can stick on a list
1604 static struct parahotplug_request *
1605 parahotplug_request_create(struct controlvm_message *msg)
1607 struct parahotplug_request *req =
1608 kmalloc(sizeof(struct parahotplug_request),
1609 GFP_KERNEL|__GFP_NORETRY);
1613 req->id = parahotplug_next_id();
1614 req->expiration = parahotplug_next_expiration();
1621 * Free a parahotplug_request.
1624 parahotplug_request_destroy(struct parahotplug_request *req)
1630 * Cause uevent to run the user level script to do the disable/enable
1631 * specified in (the CONTROLVM message in) the specified
1632 * parahotplug_request
1635 parahotplug_request_kickoff(struct parahotplug_request *req)
1637 struct controlvm_message_packet *cmd = &req->msg.cmd;
1638 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1641 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1644 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1645 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1646 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1647 cmd->device_change_state.state.active);
1648 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1649 cmd->device_change_state.bus_no);
1650 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1651 cmd->device_change_state.dev_no >> 3);
1652 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1653 cmd->device_change_state.dev_no & 0x7);
1655 LOGINF("parahotplug_request_kickoff: state=%d, bdf=%d/%d/%d, id=%u\n",
1656 cmd->device_change_state.state.active,
1657 cmd->device_change_state.bus_no,
1658 cmd->device_change_state.dev_no >> 3,
1659 cmd->device_change_state.dev_no & 7, req->id);
1661 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1666 * Remove any request from the list that's been on there too long and
1667 * respond with an error.
1670 parahotplug_process_list(void)
1672 struct list_head *pos = NULL;
1673 struct list_head *tmp = NULL;
1675 spin_lock(&Parahotplug_request_list_lock);
1677 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1678 struct parahotplug_request *req =
1679 list_entry(pos, struct parahotplug_request, list);
1680 if (time_after_eq(jiffies, req->expiration)) {
1682 if (req->msg.hdr.flags.response_expected)
1683 controlvm_respond_physdev_changestate(
1685 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1686 req->msg.cmd.device_change_state.state);
1687 parahotplug_request_destroy(req);
1691 spin_unlock(&Parahotplug_request_list_lock);
1695 * Called from the /proc handler, which means the user script has
1696 * finished the enable/disable. Find the matching identifier, and
1697 * respond to the CONTROLVM message with success.
1700 parahotplug_request_complete(int id, u16 active)
1702 struct list_head *pos = NULL;
1703 struct list_head *tmp = NULL;
1705 spin_lock(&Parahotplug_request_list_lock);
1707 /* Look for a request matching "id". */
1708 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1709 struct parahotplug_request *req =
1710 list_entry(pos, struct parahotplug_request, list);
1711 if (req->id == id) {
1712 /* Found a match. Remove it from the list and
1716 spin_unlock(&Parahotplug_request_list_lock);
1717 req->msg.cmd.device_change_state.state.active = active;
1718 if (req->msg.hdr.flags.response_expected)
1719 controlvm_respond_physdev_changestate(
1720 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1721 req->msg.cmd.device_change_state.state);
1722 parahotplug_request_destroy(req);
1727 spin_unlock(&Parahotplug_request_list_lock);
1732 * Enables or disables a PCI device by kicking off a udev script
1735 parahotplug_process_message(struct controlvm_message *inmsg)
1737 struct parahotplug_request *req;
1739 req = parahotplug_request_create(inmsg);
1742 LOGERR("parahotplug_process_message: couldn't allocate request");
1746 if (inmsg->cmd.device_change_state.state.active) {
1747 /* For enable messages, just respond with success
1748 * right away. This is a bit of a hack, but there are
1749 * issues with the early enable messages we get (with
1750 * either the udev script not detecting that the device
1751 * is up, or not getting called at all). Fortunately
1752 * the messages that get lost don't matter anyway, as
1753 * devices are automatically enabled at
1756 parahotplug_request_kickoff(req);
1757 controlvm_respond_physdev_changestate(&inmsg->hdr,
1758 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1759 device_change_state.state);
1760 parahotplug_request_destroy(req);
1762 /* For disable messages, add the request to the
1763 * request list before kicking off the udev script. It
1764 * won't get responded to until the script has
1765 * indicated it's done.
1767 spin_lock(&Parahotplug_request_list_lock);
1768 list_add_tail(&(req->list), &Parahotplug_request_list);
1769 spin_unlock(&Parahotplug_request_list_lock);
1771 parahotplug_request_kickoff(req);
1775 /* Process a controlvm message.
1777 * FALSE - this function will return FALSE only in the case where the
1778 * controlvm message was NOT processed, but processing must be
1779 * retried before reading the next controlvm message; a
1780 * scenario where this can occur is when we need to throttle
1781 * the allocation of memory in which to copy out controlvm
1783 * TRUE - processing of the controlvm message completed,
1784 * either successfully or with an error.
1787 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1789 struct controlvm_message_packet *cmd = &inmsg.cmd;
1790 u64 parametersAddr = 0;
1791 u32 parametersBytes = 0;
1792 PARSER_CONTEXT *parser_ctx = NULL;
1793 BOOL isLocalAddr = FALSE;
1794 struct controlvm_message ackmsg;
1796 /* create parsing context if necessary */
1797 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1798 if (channel_addr == 0) {
1799 LOGERR("HUH? channel_addr is 0!");
1802 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1803 parametersBytes = inmsg.hdr.payload_bytes;
1805 /* Parameter and channel addresses within test messages actually lie
1806 * within our OS-controlled memory. We need to know that, because it
1807 * makes a difference in how we compute the virtual address.
1809 if (parametersAddr != 0 && parametersBytes != 0) {
1813 parser_init_byteStream(parametersAddr, parametersBytes,
1814 isLocalAddr, &retry);
1817 LOGWRN("throttling to copy payload");
1820 LOGWRN("parsing failed");
1821 LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.id);
1822 LOGWRN("parametersAddr=0x%llx", (u64) parametersAddr);
1823 LOGWRN("parametersBytes=%lu", (ulong) parametersBytes);
1824 LOGWRN("isLocalAddr=%d", isLocalAddr);
1829 controlvm_init_response(&ackmsg, &inmsg.hdr,
1830 CONTROLVM_RESP_SUCCESS);
1831 if ((ControlVm_channel)
1833 (!visorchannel_signalinsert
1834 (ControlVm_channel, CONTROLVM_QUEUE_ACK, &ackmsg)))
1835 LOGWRN("failed to send ACK failed");
1837 switch (inmsg.hdr.id) {
1838 case CONTROLVM_CHIPSET_INIT:
1839 LOGINF("CHIPSET_INIT(#busses=%lu,#switches=%lu)",
1840 (ulong) inmsg.cmd.init_chipset.bus_count,
1841 (ulong) inmsg.cmd.init_chipset.switch_count);
1842 chipset_init(&inmsg);
1844 case CONTROLVM_BUS_CREATE:
1845 LOGINF("BUS_CREATE(%lu,#devs=%lu)",
1846 (ulong) cmd->create_bus.bus_no,
1847 (ulong) cmd->create_bus.dev_count);
1850 case CONTROLVM_BUS_DESTROY:
1851 LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroy_bus.bus_no);
1852 bus_destroy(&inmsg);
1854 case CONTROLVM_BUS_CONFIGURE:
1855 LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configure_bus.bus_no);
1856 bus_configure(&inmsg, parser_ctx);
1858 case CONTROLVM_DEVICE_CREATE:
1859 LOGINF("DEVICE_CREATE(%lu,%lu)",
1860 (ulong) cmd->create_device.bus_no,
1861 (ulong) cmd->create_device.dev_no);
1862 my_device_create(&inmsg);
1864 case CONTROLVM_DEVICE_CHANGESTATE:
1865 if (cmd->device_change_state.flags.phys_device) {
1866 LOGINF("DEVICE_CHANGESTATE for physical device (%lu,%lu, active=%lu)",
1867 (ulong) cmd->device_change_state.bus_no,
1868 (ulong) cmd->device_change_state.dev_no,
1869 (ulong) cmd->device_change_state.state.active);
1870 parahotplug_process_message(&inmsg);
1872 LOGINF("DEVICE_CHANGESTATE for virtual device (%lu,%lu, state.Alive=0x%lx)",
1873 (ulong) cmd->device_change_state.bus_no,
1874 (ulong) cmd->device_change_state.dev_no,
1875 (ulong) cmd->device_change_state.state.alive);
1876 /* save the hdr and cmd structures for later use */
1877 /* when sending back the response to Command */
1878 my_device_changestate(&inmsg);
1879 g_DiagMsgHdr = inmsg.hdr;
1880 g_DeviceChangeStatePacket = inmsg.cmd;
1884 case CONTROLVM_DEVICE_DESTROY:
1885 LOGINF("DEVICE_DESTROY(%lu,%lu)",
1886 (ulong) cmd->destroy_device.bus_no,
1887 (ulong) cmd->destroy_device.dev_no);
1888 my_device_destroy(&inmsg);
1890 case CONTROLVM_DEVICE_CONFIGURE:
1891 LOGINF("DEVICE_CONFIGURE(%lu,%lu)",
1892 (ulong) cmd->configure_device.bus_no,
1893 (ulong) cmd->configure_device.dev_no);
1894 /* no op for now, just send a respond that we passed */
1895 if (inmsg.hdr.flags.response_expected)
1896 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1898 case CONTROLVM_CHIPSET_READY:
1899 LOGINF("CHIPSET_READY");
1900 chipset_ready(&inmsg.hdr);
1902 case CONTROLVM_CHIPSET_SELFTEST:
1903 LOGINF("CHIPSET_SELFTEST");
1904 chipset_selftest(&inmsg.hdr);
1906 case CONTROLVM_CHIPSET_STOP:
1907 LOGINF("CHIPSET_STOP");
1908 chipset_notready(&inmsg.hdr);
1911 LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.id);
1912 if (inmsg.hdr.flags.response_expected)
1913 controlvm_respond(&inmsg.hdr,
1914 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1918 if (parser_ctx != NULL) {
1919 parser_done(parser_ctx);
1925 static HOSTADDRESS controlvm_get_channel_address(void)
1930 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size))) {
1931 ERRDRV("%s - vmcall to determine controlvm channel addr failed",
1935 INFODRV("controlvm addr=%Lx", addr);
1940 controlvm_periodic_work(struct work_struct *work)
1942 struct controlvm_message inmsg;
1943 BOOL gotACommand = FALSE;
1944 BOOL handle_command_failed = FALSE;
1945 static u64 Poll_Count;
1947 /* make sure visorbus server is registered for controlvm callbacks */
1948 if (visorchipset_serverregwait && !serverregistered)
1950 /* make sure visorclientbus server is regsitered for controlvm
1953 if (visorchipset_clientregwait && !clientregistered)
1957 if (Poll_Count >= 250)
1962 /* Check events to determine if response to CHIPSET_READY
1965 if (visorchipset_holdchipsetready
1966 && (g_ChipSetMsgHdr.id != CONTROLVM_INVALID)) {
1967 if (check_chipset_events() == 1) {
1968 LOGINF("Sending CHIPSET_READY response");
1969 controlvm_respond(&g_ChipSetMsgHdr, 0);
1970 clear_chipset_events();
1971 memset(&g_ChipSetMsgHdr, 0,
1972 sizeof(struct controlvm_message_header));
1976 while (visorchannel_signalremove(ControlVm_channel,
1977 CONTROLVM_QUEUE_RESPONSE,
1979 if (inmsg.hdr.payload_max_bytes != 0) {
1980 LOGERR("Payload of size %lu returned @%lu with unexpected message id %d.",
1981 (ulong) inmsg.hdr.payload_max_bytes,
1982 (ulong) inmsg.hdr.payload_vm_offset,
1987 if (ControlVm_Pending_Msg_Valid) {
1988 /* we throttled processing of a prior
1989 * msg, so try to process it again
1990 * rather than reading a new one
1992 inmsg = ControlVm_Pending_Msg;
1993 ControlVm_Pending_Msg_Valid = FALSE;
1996 gotACommand = read_controlvm_event(&inmsg);
1999 handle_command_failed = FALSE;
2000 while (gotACommand && (!handle_command_failed)) {
2001 Most_recent_message_jiffies = jiffies;
2002 if (handle_command(inmsg,
2003 visorchannel_get_physaddr
2004 (ControlVm_channel)))
2005 gotACommand = read_controlvm_event(&inmsg);
2007 /* this is a scenario where throttling
2008 * is required, but probably NOT an
2009 * error...; we stash the current
2010 * controlvm msg so we will attempt to
2011 * reprocess it on our next loop
2013 handle_command_failed = TRUE;
2014 ControlVm_Pending_Msg = inmsg;
2015 ControlVm_Pending_Msg_Valid = TRUE;
2019 /* parahotplug_worker */
2020 parahotplug_process_list();
2024 if (time_after(jiffies,
2025 Most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2026 /* it's been longer than MIN_IDLE_SECONDS since we
2027 * processed our last controlvm message; slow down the
2030 if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW) {
2031 LOGINF("switched to slow controlvm polling");
2032 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2035 if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST) {
2036 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2037 LOGINF("switched to fast controlvm polling");
2041 queue_delayed_work(Periodic_controlvm_workqueue,
2042 &Periodic_controlvm_work, Poll_jiffies);
2046 setup_crash_devices_work_queue(struct work_struct *work)
2049 struct controlvm_message localCrashCreateBusMsg;
2050 struct controlvm_message localCrashCreateDevMsg;
2051 struct controlvm_message msg;
2052 u32 localSavedCrashMsgOffset;
2053 u16 localSavedCrashMsgCount;
2055 /* make sure visorbus server is registered for controlvm callbacks */
2056 if (visorchipset_serverregwait && !serverregistered)
2059 /* make sure visorclientbus server is regsitered for controlvm
2062 if (visorchipset_clientregwait && !clientregistered)
2065 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2067 /* send init chipset msg */
2068 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2069 msg.cmd.init_chipset.bus_count = 23;
2070 msg.cmd.init_chipset.switch_count = 0;
2074 /* get saved message count */
2075 if (visorchannel_read(ControlVm_channel,
2076 offsetof(struct spar_controlvm_channel_protocol,
2077 saved_crash_message_count),
2078 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
2079 LOGERR("failed to get Saved Message Count");
2080 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2081 POSTCODE_SEVERITY_ERR);
2085 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
2086 LOGERR("Saved Message Count incorrect %d",
2087 localSavedCrashMsgCount);
2088 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2089 localSavedCrashMsgCount,
2090 POSTCODE_SEVERITY_ERR);
2094 /* get saved crash message offset */
2095 if (visorchannel_read(ControlVm_channel,
2096 offsetof(struct spar_controlvm_channel_protocol,
2097 saved_crash_message_offset),
2098 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
2099 LOGERR("failed to get Saved Message Offset");
2100 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2101 POSTCODE_SEVERITY_ERR);
2105 /* read create device message for storage bus offset */
2106 if (visorchannel_read(ControlVm_channel,
2107 localSavedCrashMsgOffset,
2108 &localCrashCreateBusMsg,
2109 sizeof(struct controlvm_message)) < 0) {
2110 LOGERR("CRASH_DEV_RD_BUS_FAIULRE: Failed to read CrashCreateBusMsg!");
2111 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2112 POSTCODE_SEVERITY_ERR);
2116 /* read create device message for storage device */
2117 if (visorchannel_read(ControlVm_channel,
2118 localSavedCrashMsgOffset +
2119 sizeof(struct controlvm_message),
2120 &localCrashCreateDevMsg,
2121 sizeof(struct controlvm_message)) < 0) {
2122 LOGERR("CRASH_DEV_RD_DEV_FAIULRE: Failed to read CrashCreateDevMsg!");
2123 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2124 POSTCODE_SEVERITY_ERR);
2128 /* reuse IOVM create bus message */
2129 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
2130 bus_create(&localCrashCreateBusMsg);
2132 LOGERR("CrashCreateBusMsg is null, no dump will be taken");
2133 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2134 POSTCODE_SEVERITY_ERR);
2138 /* reuse create device message for storage device */
2139 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
2140 my_device_create(&localCrashCreateDevMsg);
2142 LOGERR("CrashCreateDevMsg is null, no dump will be taken");
2143 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2144 POSTCODE_SEVERITY_ERR);
2147 LOGINF("Bus and device ready for dumping");
2148 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2153 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2155 queue_delayed_work(Periodic_controlvm_workqueue,
2156 &Periodic_controlvm_work, Poll_jiffies);
2160 bus_create_response(ulong busNo, int response)
2162 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2166 bus_destroy_response(ulong busNo, int response)
2168 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2172 device_create_response(ulong busNo, ulong devNo, int response)
2174 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2178 device_destroy_response(ulong busNo, ulong devNo, int response)
2180 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2184 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
2187 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2188 bus_no, dev_no, response,
2189 segment_state_standby);
2191 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2194 device_resume_response(ulong busNo, ulong devNo, int response)
2196 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2197 busNo, devNo, response,
2198 segment_state_running);
2202 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2204 void *p = findbus(&BusInfoList, bus_no);
2207 LOGERR("(%lu) failed", bus_no);
2210 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2213 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2216 visorchipset_set_bus_context(ulong bus_no, void *context)
2218 struct visorchipset_bus_info *p = findbus(&BusInfoList, bus_no);
2221 LOGERR("(%lu) failed", bus_no);
2224 p->bus_driver_context = context;
2227 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2230 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2231 struct visorchipset_device_info *dev_info)
2233 void *p = finddevice(&DevInfoList, bus_no, dev_no);
2236 LOGERR("(%lu,%lu) failed", bus_no, dev_no);
2239 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2242 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2245 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2247 struct visorchipset_device_info *p =
2248 finddevice(&DevInfoList, bus_no, dev_no);
2251 LOGERR("(%lu,%lu) failed", bus_no, dev_no);
2254 p->bus_driver_context = context;
2257 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2259 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2262 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2272 /* __GFP_NORETRY means "ok to fail", meaning
2273 * kmem_cache_alloc() can return NULL, implying the caller CAN
2274 * cope with failure. If you do NOT specify __GFP_NORETRY,
2275 * Linux will go to extreme measures to get memory for you
2276 * (like, invoke oom killer), which will probably cripple the
2279 gfp |= __GFP_NORETRY;
2280 p = kmem_cache_alloc(pool, gfp);
2282 LOGERR("kmem_cache_alloc failed early @%s:%d\n", fn, ln);
2285 atomic_inc(&Visorchipset_cache_buffers_in_use);
2289 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2292 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2295 LOGERR("NULL pointer @%s:%d\n", fn, ln);
2298 atomic_dec(&Visorchipset_cache_buffers_in_use);
2299 kmem_cache_free(pool, p);
2302 static ssize_t chipsetready_store(struct device *dev,
2303 struct device_attribute *attr, const char *buf, size_t count)
2307 if (sscanf(buf, "%63s", msgtype) != 1)
2310 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2311 chipset_events[0] = 1;
2313 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2314 chipset_events[1] = 1;
2320 /* The parahotplug/devicedisabled interface gets called by our support script
2321 * when an SR-IOV device has been shut down. The ID is passed to the script
2322 * and then passed back when the device has been removed.
2324 static ssize_t devicedisabled_store(struct device *dev,
2325 struct device_attribute *attr, const char *buf, size_t count)
2329 if (kstrtouint(buf, 10, &id) != 0)
2332 parahotplug_request_complete(id, 0);
2336 /* The parahotplug/deviceenabled interface gets called by our support script
2337 * when an SR-IOV device has been recovered. The ID is passed to the script
2338 * and then passed back when the device has been brought back up.
2340 static ssize_t deviceenabled_store(struct device *dev,
2341 struct device_attribute *attr, const char *buf, size_t count)
2345 if (kstrtouint(buf, 10, &id) != 0)
2348 parahotplug_request_complete(id, 1);
2353 visorchipset_init(void)
2359 if (!unisys_spar_platform)
2362 LOGINF("chipset driver version %s loaded", VERSION);
2363 /* process module options */
2364 POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2366 LOGINF("option - testvnic=%d", visorchipset_testvnic);
2367 LOGINF("option - testvnicclient=%d", visorchipset_testvnicclient);
2368 LOGINF("option - testmsg=%d", visorchipset_testmsg);
2369 LOGINF("option - testteardown=%d", visorchipset_testteardown);
2370 LOGINF("option - major=%d", visorchipset_major);
2371 LOGINF("option - serverregwait=%d", visorchipset_serverregwait);
2372 LOGINF("option - clientregwait=%d", visorchipset_clientregwait);
2373 LOGINF("option - holdchipsetready=%d", visorchipset_holdchipsetready);
2375 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2376 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2377 memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
2378 memset(&LiveDump_info, 0, sizeof(LiveDump_info));
2379 atomic_set(&LiveDump_info.buffers_in_use, 0);
2381 if (visorchipset_testvnic) {
2382 ERRDRV("testvnic option no longer supported: (status = %d)\n",
2384 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2389 addr = controlvm_get_channel_address();
2392 visorchannel_create_with_lock
2394 sizeof(struct spar_controlvm_channel_protocol),
2395 spar_controlvm_channel_protocol_uuid);
2396 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2397 visorchannel_get_header(ControlVm_channel))) {
2398 LOGINF("Channel %s (ControlVm) discovered",
2399 visorchannel_id(ControlVm_channel, s));
2400 initialize_controlvm_payload();
2402 LOGERR("controlvm channel is invalid");
2403 visorchannel_destroy(ControlVm_channel);
2404 ControlVm_channel = NULL;
2408 LOGERR("no controlvm channel discovered");
2412 MajorDev = MKDEV(visorchipset_major, 0);
2413 rc = visorchipset_file_init(MajorDev, &ControlVm_channel);
2415 ERRDRV("visorchipset_file_init(MajorDev, &ControlVm_channel): error (status=%d)\n", rc);
2416 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2420 memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
2422 memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
2424 memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
2426 Putfile_buffer_list_pool =
2427 kmem_cache_create(Putfile_buffer_list_pool_name,
2428 sizeof(struct putfile_buffer_entry),
2429 0, SLAB_HWCACHE_ALIGN, NULL);
2430 if (!Putfile_buffer_list_pool) {
2431 ERRDRV("failed to alloc Putfile_buffer_list_pool: (status=-1)\n");
2432 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2436 if (visorchipset_disable_controlvm) {
2437 LOGINF("visorchipset_init:controlvm disabled");
2439 /* if booting in a crash kernel */
2440 if (visorchipset_crash_kernel)
2441 INIT_DELAYED_WORK(&Periodic_controlvm_work,
2442 setup_crash_devices_work_queue);
2444 INIT_DELAYED_WORK(&Periodic_controlvm_work,
2445 controlvm_periodic_work);
2446 Periodic_controlvm_workqueue =
2447 create_singlethread_workqueue("visorchipset_controlvm");
2449 if (Periodic_controlvm_workqueue == NULL) {
2450 ERRDRV("cannot create controlvm workqueue: (status=%d)\n",
2452 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2457 Most_recent_message_jiffies = jiffies;
2458 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2459 rc = queue_delayed_work(Periodic_controlvm_workqueue,
2460 &Periodic_controlvm_work, Poll_jiffies);
2462 ERRDRV("queue_delayed_work(Periodic_controlvm_workqueue, &Periodic_controlvm_work, Poll_jiffies): error (status=%d)\n", rc);
2463 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2470 Visorchipset_platform_device.dev.devt = MajorDev;
2471 if (platform_device_register(&Visorchipset_platform_device) < 0) {
2472 ERRDRV("platform_device_register(visorchipset) failed: (status=-1)\n");
2473 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2477 LOGINF("visorchipset device created");
2478 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2482 LOGERR("visorchipset_init failed");
2483 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2484 POSTCODE_SEVERITY_ERR);
2490 visorchipset_exit(void)
2494 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2496 if (visorchipset_disable_controlvm) {
2499 cancel_delayed_work(&Periodic_controlvm_work);
2500 flush_workqueue(Periodic_controlvm_workqueue);
2501 destroy_workqueue(Periodic_controlvm_workqueue);
2502 Periodic_controlvm_workqueue = NULL;
2503 destroy_controlvm_payload_info(&ControlVm_payload_info);
2505 Test_Vnic_channel = NULL;
2506 if (Putfile_buffer_list_pool) {
2507 kmem_cache_destroy(Putfile_buffer_list_pool);
2508 Putfile_buffer_list_pool = NULL;
2511 cleanup_controlvm_structures();
2513 memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
2515 memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
2517 memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
2519 LOGINF("Channel %s (ControlVm) disconnected",
2520 visorchannel_id(ControlVm_channel, s));
2521 visorchannel_destroy(ControlVm_channel);
2523 visorchipset_file_cleanup();
2524 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2525 LOGINF("chipset driver unloaded");
2528 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2529 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2530 int visorchipset_testvnic = 0;
2532 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2533 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2534 int visorchipset_testvnicclient = 0;
2536 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2537 MODULE_PARM_DESC(visorchipset_testmsg,
2538 "1 to manufacture the chipset, bus, and switch messages");
2539 int visorchipset_testmsg = 0;
2541 module_param_named(major, visorchipset_major, int, S_IRUGO);
2542 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2543 int visorchipset_major = 0;
2545 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2546 MODULE_PARM_DESC(visorchipset_serverreqwait,
2547 "1 to have the module wait for the visor bus to register");
2548 int visorchipset_serverregwait = 0; /* default is off */
2549 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2550 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2551 int visorchipset_clientregwait = 1; /* default is on */
2552 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2553 MODULE_PARM_DESC(visorchipset_testteardown,
2554 "1 to test teardown of the chipset, bus, and switch");
2555 int visorchipset_testteardown = 0; /* default is off */
2556 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2558 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2559 "1 to disable polling of controlVm channel");
2560 int visorchipset_disable_controlvm = 0; /* default is off */
2561 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2562 MODULE_PARM_DESC(visorchipset_crash_kernel,
2563 "1 means we are running in crash kernel");
2564 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2565 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2567 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2568 "1 to hold response to CHIPSET_READY");
2569 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2570 * response immediately */
2571 module_init(visorchipset_init);
2572 module_exit(visorchipset_exit);
2574 MODULE_AUTHOR("Unisys");
2575 MODULE_LICENSE("GPL");
2576 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2578 MODULE_VERSION(VERSION);