staging: unisys: get rid of Test_Vnic_channel
[firefly-linux-kernel-4.4.55.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uisutils.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
28
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0"        /* physical network itf for
36                                          * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE   50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode.  As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
49 */
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies;       /* when we got our last
53                                                  * controlvm message */
54 static inline char *
55 NONULLSTR(char *s)
56 {
57         if (s)
58                 return s;
59         return "";
60 }
61
62 static int serverregistered;
63 static int clientregistered;
64
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
67
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
71
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76         SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
81
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
83  * "visorhackbus")
84  */
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86         (((uuid_le_cmp(channel_type_guid,\
87                        spar_vnic_channel_protocol_uuid) == 0) ||\
88         (uuid_le_cmp(channel_type_guid,\
89                         spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92 #define is_diagpool_channel(channel_type_guid) \
93         (uuid_le_cmp(channel_type_guid,\
94                      spar_diag_pool_channel_protocol_uuid) == 0)
95
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
98
99 static struct visorchannel *controlvm_channel;
100
101 struct controlvm_payload_info {
102         u8 __iomem *ptr;        /* pointer to base address of payload pool */
103         u64 offset;             /* offset from beginning of controlvm
104                                  * channel to beginning of payload * pool */
105         u32 bytes;              /* number of bytes in payload pool */
106 };
107
108 /* Manages the request payload in the controlvm channel */
109 static struct controlvm_payload_info ControlVm_payload_info;
110
111 struct livedump_info {
112         struct controlvm_message_header Dumpcapture_header;
113         struct controlvm_message_header Gettextdump_header;
114         struct controlvm_message_header Dumpcomplete_header;
115         BOOL Gettextdump_outstanding;
116         u32 crc32;
117         ulong length;
118         atomic_t buffers_in_use;
119         ulong destination;
120 };
121 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
122  * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
123  */
124 static struct livedump_info LiveDump_info;
125
126 /* The following globals are used to handle the scenario where we are unable to
127  * offload the payload from a controlvm message due to memory requirements.  In
128  * this scenario, we simply stash the controlvm message, then attempt to
129  * process it again the next time controlvm_periodic_work() runs.
130  */
131 static struct controlvm_message ControlVm_Pending_Msg;
132 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
133
134 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
135  * TRANSMIT_FILE PutFile payloads.
136  */
137 static struct kmem_cache *Putfile_buffer_list_pool;
138 static const char Putfile_buffer_list_pool_name[] =
139         "controlvm_putfile_buffer_list_pool";
140
141 /* This identifies a data buffer that has been received via a controlvm messages
142  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
143  */
144 struct putfile_buffer_entry {
145         struct list_head next;  /* putfile_buffer_entry list */
146         struct parser_context *parser_ctx; /* points to input data buffer */
147 };
148
149 /* List of struct putfile_request *, via next_putfile_request member.
150  * Each entry in this list identifies an outstanding TRANSMIT_FILE
151  * conversation.
152  */
153 static LIST_HEAD(Putfile_request_list);
154
155 /* This describes a buffer and its current state of transfer (e.g., how many
156  * bytes have already been supplied as putfile data, and how many bytes are
157  * remaining) for a putfile_request.
158  */
159 struct putfile_active_buffer {
160         /* a payload from a controlvm message, containing a file data buffer */
161         struct parser_context *parser_ctx;
162         /* points within data area of parser_ctx to next byte of data */
163         u8 *pnext;
164         /* # bytes left from <pnext> to the end of this data buffer */
165         size_t bytes_remaining;
166 };
167
168 #define PUTFILE_REQUEST_SIG 0x0906101302281211
169 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
170  * conversation.  Structs of this type are dynamically linked into
171  * <Putfile_request_list>.
172  */
173 struct putfile_request {
174         u64 sig;                /* PUTFILE_REQUEST_SIG */
175
176         /* header from original TransmitFile request */
177         struct controlvm_message_header controlvm_header;
178         u64 file_request_number;        /* from original TransmitFile request */
179
180         /* link to next struct putfile_request */
181         struct list_head next_putfile_request;
182
183         /* most-recent sequence number supplied via a controlvm message */
184         u64 data_sequence_number;
185
186         /* head of putfile_buffer_entry list, which describes the data to be
187          * supplied as putfile data;
188          * - this list is added to when controlvm messages come in that supply
189          * file data
190          * - this list is removed from via the hotplug program that is actually
191          * consuming these buffers to write as file data */
192         struct list_head input_buffer_list;
193         spinlock_t req_list_lock;       /* lock for input_buffer_list */
194
195         /* waiters for input_buffer_list to go non-empty */
196         wait_queue_head_t input_buffer_wq;
197
198         /* data not yet read within current putfile_buffer_entry */
199         struct putfile_active_buffer active_buf;
200
201         /* <0 = failed, 0 = in-progress, >0 = successful; */
202         /* note that this must be set with req_list_lock, and if you set <0, */
203         /* it is your responsibility to also free up all of the other objects */
204         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
205         /* before releasing the lock */
206         int completion_status;
207 };
208
209 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
210
211 struct parahotplug_request {
212         struct list_head list;
213         int id;
214         unsigned long expiration;
215         struct controlvm_message msg;
216 };
217
218 static LIST_HEAD(Parahotplug_request_list);
219 static DEFINE_SPINLOCK(Parahotplug_request_list_lock);  /* lock for above */
220 static void parahotplug_process_list(void);
221
222 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
223  * CONTROLVM_REPORTEVENT.
224  */
225 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
226 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
227
228 static void bus_create_response(ulong busNo, int response);
229 static void bus_destroy_response(ulong busNo, int response);
230 static void device_create_response(ulong busNo, ulong devNo, int response);
231 static void device_destroy_response(ulong busNo, ulong devNo, int response);
232 static void device_resume_response(ulong busNo, ulong devNo, int response);
233
234 static struct visorchipset_busdev_responders BusDev_Responders = {
235         .bus_create = bus_create_response,
236         .bus_destroy = bus_destroy_response,
237         .device_create = device_create_response,
238         .device_destroy = device_destroy_response,
239         .device_pause = visorchipset_device_pause_response,
240         .device_resume = device_resume_response,
241 };
242
243 /* info for /dev/visorchipset */
244 static dev_t MajorDev = -1; /**< indicates major num for device */
245
246 /* prototypes for attributes */
247 static ssize_t toolaction_show(struct device *dev,
248         struct device_attribute *attr, char *buf);
249 static ssize_t toolaction_store(struct device *dev,
250         struct device_attribute *attr, const char *buf, size_t count);
251 static DEVICE_ATTR_RW(toolaction);
252
253 static ssize_t boottotool_show(struct device *dev,
254         struct device_attribute *attr, char *buf);
255 static ssize_t boottotool_store(struct device *dev,
256         struct device_attribute *attr, const char *buf, size_t count);
257 static DEVICE_ATTR_RW(boottotool);
258
259 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
260         char *buf);
261 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
262         const char *buf, size_t count);
263 static DEVICE_ATTR_RW(error);
264
265 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
266         char *buf);
267 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
268         const char *buf, size_t count);
269 static DEVICE_ATTR_RW(textid);
270
271 static ssize_t remaining_steps_show(struct device *dev,
272         struct device_attribute *attr, char *buf);
273 static ssize_t remaining_steps_store(struct device *dev,
274         struct device_attribute *attr, const char *buf, size_t count);
275 static DEVICE_ATTR_RW(remaining_steps);
276
277 static ssize_t chipsetready_store(struct device *dev,
278                 struct device_attribute *attr, const char *buf, size_t count);
279 static DEVICE_ATTR_WO(chipsetready);
280
281 static ssize_t devicedisabled_store(struct device *dev,
282                 struct device_attribute *attr, const char *buf, size_t count);
283 static DEVICE_ATTR_WO(devicedisabled);
284
285 static ssize_t deviceenabled_store(struct device *dev,
286                 struct device_attribute *attr, const char *buf, size_t count);
287 static DEVICE_ATTR_WO(deviceenabled);
288
289 static struct attribute *visorchipset_install_attrs[] = {
290         &dev_attr_toolaction.attr,
291         &dev_attr_boottotool.attr,
292         &dev_attr_error.attr,
293         &dev_attr_textid.attr,
294         &dev_attr_remaining_steps.attr,
295         NULL
296 };
297
298 static struct attribute_group visorchipset_install_group = {
299         .name = "install",
300         .attrs = visorchipset_install_attrs
301 };
302
303 static struct attribute *visorchipset_guest_attrs[] = {
304         &dev_attr_chipsetready.attr,
305         NULL
306 };
307
308 static struct attribute_group visorchipset_guest_group = {
309         .name = "guest",
310         .attrs = visorchipset_guest_attrs
311 };
312
313 static struct attribute *visorchipset_parahotplug_attrs[] = {
314         &dev_attr_devicedisabled.attr,
315         &dev_attr_deviceenabled.attr,
316         NULL
317 };
318
319 static struct attribute_group visorchipset_parahotplug_group = {
320         .name = "parahotplug",
321         .attrs = visorchipset_parahotplug_attrs
322 };
323
324 static const struct attribute_group *visorchipset_dev_groups[] = {
325         &visorchipset_install_group,
326         &visorchipset_guest_group,
327         &visorchipset_parahotplug_group,
328         NULL
329 };
330
331 /* /sys/devices/platform/visorchipset */
332 static struct platform_device Visorchipset_platform_device = {
333         .name = "visorchipset",
334         .id = -1,
335         .dev.groups = visorchipset_dev_groups,
336 };
337
338 /* Function prototypes */
339 static void controlvm_respond(struct controlvm_message_header *msgHdr,
340                               int response);
341 static void controlvm_respond_chipset_init(
342                 struct controlvm_message_header *msgHdr, int response,
343                 enum ultra_chipset_feature features);
344 static void controlvm_respond_physdev_changestate(
345                 struct controlvm_message_header *msgHdr, int response,
346                 struct spar_segment_state state);
347
348 static ssize_t toolaction_show(struct device *dev,
349                                struct device_attribute *attr,
350                                char *buf)
351 {
352         u8 toolAction;
353
354         visorchannel_read(controlvm_channel,
355                 offsetof(struct spar_controlvm_channel_protocol,
356                            tool_action), &toolAction, sizeof(u8));
357         return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
358 }
359
360 static ssize_t toolaction_store(struct device *dev,
361                                 struct device_attribute *attr,
362                                 const char *buf, size_t count)
363 {
364         u8 toolAction;
365         int ret;
366
367         if (kstrtou8(buf, 10, &toolAction) != 0)
368                 return -EINVAL;
369
370         ret = visorchannel_write(controlvm_channel,
371                 offsetof(struct spar_controlvm_channel_protocol, tool_action),
372                 &toolAction, sizeof(u8));
373
374         if (ret)
375                 return ret;
376         return count;
377 }
378
379 static ssize_t boottotool_show(struct device *dev,
380                                struct device_attribute *attr,
381                                char *buf)
382 {
383         struct efi_spar_indication efiSparIndication;
384
385         visorchannel_read(controlvm_channel,
386                 offsetof(struct spar_controlvm_channel_protocol,
387                         efi_spar_ind), &efiSparIndication,
388                 sizeof(struct efi_spar_indication));
389         return scnprintf(buf, PAGE_SIZE, "%u\n",
390                         efiSparIndication.boot_to_tool);
391 }
392
393 static ssize_t boottotool_store(struct device *dev,
394                                 struct device_attribute *attr,
395                                 const char *buf, size_t count)
396 {
397         int val, ret;
398         struct efi_spar_indication efiSparIndication;
399
400         if (kstrtoint(buf, 10, &val) != 0)
401                 return -EINVAL;
402
403         efiSparIndication.boot_to_tool = val;
404         ret = visorchannel_write(controlvm_channel,
405                         offsetof(struct spar_controlvm_channel_protocol,
406                                 efi_spar_ind),
407                         &(efiSparIndication),
408                 sizeof(struct efi_spar_indication));
409
410         if (ret)
411                 return ret;
412         return count;
413 }
414
415 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
416                 char *buf)
417 {
418         u32 error;
419
420         visorchannel_read(controlvm_channel, offsetof(
421                 struct spar_controlvm_channel_protocol, installation_error),
422                 &error, sizeof(u32));
423         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
424 }
425
426 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
427                 const char *buf, size_t count)
428 {
429         u32 error;
430         int ret;
431
432         if (kstrtou32(buf, 10, &error) != 0)
433                 return -EINVAL;
434
435         ret = visorchannel_write(controlvm_channel,
436                         offsetof(struct spar_controlvm_channel_protocol,
437                                 installation_error),
438                         &error, sizeof(u32));
439         if (ret)
440                 return ret;
441         return count;
442 }
443
444 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
445                 char *buf)
446 {
447         u32 textId;
448
449         visorchannel_read(controlvm_channel, offsetof(
450                 struct spar_controlvm_channel_protocol, installation_text_id),
451                 &textId, sizeof(u32));
452         return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
453 }
454
455 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
456                 const char *buf, size_t count)
457 {
458         u32 textId;
459         int ret;
460
461         if (kstrtou32(buf, 10, &textId) != 0)
462                 return -EINVAL;
463
464         ret = visorchannel_write(controlvm_channel,
465                         offsetof(struct spar_controlvm_channel_protocol,
466                                 installation_text_id),
467                         &textId, sizeof(u32));
468         if (ret)
469                 return ret;
470         return count;
471 }
472
473
474 static ssize_t remaining_steps_show(struct device *dev,
475         struct device_attribute *attr, char *buf)
476 {
477         u16 remainingSteps;
478
479         visorchannel_read(controlvm_channel,
480                 offsetof(struct spar_controlvm_channel_protocol,
481                         installation_remaining_steps),
482                 &remainingSteps,
483                 sizeof(u16));
484         return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
485 }
486
487 static ssize_t remaining_steps_store(struct device *dev,
488         struct device_attribute *attr, const char *buf, size_t count)
489 {
490         u16 remainingSteps;
491         int ret;
492
493         if (kstrtou16(buf, 10, &remainingSteps) != 0)
494                 return -EINVAL;
495
496         ret = visorchannel_write(controlvm_channel,
497                         offsetof(struct spar_controlvm_channel_protocol,
498                                 installation_remaining_steps),
499                         &remainingSteps, sizeof(u16));
500         if (ret)
501                 return ret;
502         return count;
503 }
504
505 #if 0
506 static void
507 testUnicode(void)
508 {
509         wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
510         char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
511         wchar_t unicode2[99];
512
513         /* NOTE: Either due to a bug, or feature I don't understand, the
514          *       kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
515          *       trailed NUL byte!!   REALLY!!!!!    Arrrrgggghhhhh
516          */
517
518         LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
519         LOGINF("utf8_wcstombs=%d",
520                chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
521         if (chrs >= 0)
522                 s[chrs] = '\0'; /* GRRRRRRRR */
523         LOGINF("s='%s'", s);
524         LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
525         if (chrs >= 0)
526                 unicode2[chrs] = 0;     /* GRRRRRRRR */
527         if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
528                 LOGINF("strings match... good");
529         else
530                 LOGINF("strings did not match!!");
531 }
532 #endif
533
534 static void
535 busInfo_clear(void *v)
536 {
537         struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
538
539         if (p->proc_object) {
540                 visor_proc_DestroyObject(p->proc_object);
541                 p->proc_object = NULL;
542         }
543         kfree(p->name);
544         p->name = NULL;
545
546         kfree(p->description);
547         p->description = NULL;
548
549         p->state.created = 0;
550         memset(p, 0, sizeof(struct visorchipset_bus_info));
551 }
552
553 static void
554 devInfo_clear(void *v)
555 {
556         struct visorchipset_device_info *p =
557                         (struct visorchipset_device_info *)(v);
558
559         p->state.created = 0;
560         memset(p, 0, sizeof(struct visorchipset_device_info));
561 }
562
563 static u8
564 check_chipset_events(void)
565 {
566         int i;
567         u8 send_msg = 1;
568         /* Check events to determine if response should be sent */
569         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
570                 send_msg &= chipset_events[i];
571         return send_msg;
572 }
573
574 static void
575 clear_chipset_events(void)
576 {
577         int i;
578         /* Clear chipset_events */
579         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
580                 chipset_events[i] = 0;
581 }
582
583 void
584 visorchipset_register_busdev_server(
585                         struct visorchipset_busdev_notifiers *notifiers,
586                         struct visorchipset_busdev_responders *responders,
587                         struct ultra_vbus_deviceinfo *driver_info)
588 {
589         down(&notifier_lock);
590         if (notifiers == NULL) {
591                 memset(&BusDev_Server_Notifiers, 0,
592                        sizeof(BusDev_Server_Notifiers));
593                 serverregistered = 0;   /* clear flag */
594         } else {
595                 BusDev_Server_Notifiers = *notifiers;
596                 serverregistered = 1;   /* set flag */
597         }
598         if (responders)
599                 *responders = BusDev_Responders;
600         if (driver_info)
601                 bus_device_info_init(driver_info, "chipset", "visorchipset",
602                                    VERSION, NULL);
603
604         up(&notifier_lock);
605 }
606 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
607
608 void
609 visorchipset_register_busdev_client(
610                         struct visorchipset_busdev_notifiers *notifiers,
611                         struct visorchipset_busdev_responders *responders,
612                         struct ultra_vbus_deviceinfo *driver_info)
613 {
614         down(&notifier_lock);
615         if (notifiers == NULL) {
616                 memset(&BusDev_Client_Notifiers, 0,
617                        sizeof(BusDev_Client_Notifiers));
618                 clientregistered = 0;   /* clear flag */
619         } else {
620                 BusDev_Client_Notifiers = *notifiers;
621                 clientregistered = 1;   /* set flag */
622         }
623         if (responders)
624                 *responders = BusDev_Responders;
625         if (driver_info)
626                 bus_device_info_init(driver_info, "chipset(bolts)",
627                                      "visorchipset", VERSION, NULL);
628         up(&notifier_lock);
629 }
630 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
631
632 static void
633 cleanup_controlvm_structures(void)
634 {
635         struct visorchipset_bus_info *bi, *tmp_bi;
636         struct visorchipset_device_info *di, *tmp_di;
637
638         list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
639                 busInfo_clear(bi);
640                 list_del(&bi->entry);
641                 kfree(bi);
642         }
643
644         list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
645                 devInfo_clear(di);
646                 list_del(&di->entry);
647                 kfree(di);
648         }
649 }
650
651 static void
652 chipset_init(struct controlvm_message *inmsg)
653 {
654         static int chipset_inited;
655         enum ultra_chipset_feature features = 0;
656         int rc = CONTROLVM_RESP_SUCCESS;
657
658         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
659         if (chipset_inited) {
660                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
661                 goto Away;
662         }
663         chipset_inited = 1;
664         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
665
666         /* Set features to indicate we support parahotplug (if Command
667          * also supports it). */
668         features =
669             inmsg->cmd.init_chipset.
670             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
671
672         /* Set the "reply" bit so Command knows this is a
673          * features-aware driver. */
674         features |= ULTRA_CHIPSET_FEATURE_REPLY;
675
676 Away:
677         if (rc < 0)
678                 cleanup_controlvm_structures();
679         if (inmsg->hdr.flags.response_expected)
680                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
681 }
682
683 static void
684 controlvm_init_response(struct controlvm_message *msg,
685                         struct controlvm_message_header *msgHdr, int response)
686 {
687         memset(msg, 0, sizeof(struct controlvm_message));
688         memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
689         msg->hdr.payload_bytes = 0;
690         msg->hdr.payload_vm_offset = 0;
691         msg->hdr.payload_max_bytes = 0;
692         if (response < 0) {
693                 msg->hdr.flags.failed = 1;
694                 msg->hdr.completion_status = (u32) (-response);
695         }
696 }
697
698 static void
699 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
700 {
701         struct controlvm_message outmsg;
702
703         controlvm_init_response(&outmsg, msgHdr, response);
704         /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
705         * back the deviceChangeState structure in the packet. */
706         if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
707             g_devicechangestate_packet.device_change_state.bus_no ==
708             g_diagpool_bus_no &&
709             g_devicechangestate_packet.device_change_state.dev_no ==
710             g_diagpool_dev_no)
711                 outmsg.cmd = g_devicechangestate_packet;
712         if (outmsg.hdr.flags.test_message == 1)
713                 return;
714
715         if (!visorchannel_signalinsert(controlvm_channel,
716                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
717                 return;
718         }
719 }
720
721 static void
722 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
723                                int response,
724                                enum ultra_chipset_feature features)
725 {
726         struct controlvm_message outmsg;
727
728         controlvm_init_response(&outmsg, msgHdr, response);
729         outmsg.cmd.init_chipset.features = features;
730         if (!visorchannel_signalinsert(controlvm_channel,
731                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
732                 return;
733         }
734 }
735
736 static void controlvm_respond_physdev_changestate(
737                 struct controlvm_message_header *msgHdr, int response,
738                 struct spar_segment_state state)
739 {
740         struct controlvm_message outmsg;
741
742         controlvm_init_response(&outmsg, msgHdr, response);
743         outmsg.cmd.device_change_state.state = state;
744         outmsg.cmd.device_change_state.flags.phys_device = 1;
745         if (!visorchannel_signalinsert(controlvm_channel,
746                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
747                 return;
748         }
749 }
750
751 void
752 visorchipset_save_message(struct controlvm_message *msg,
753                           enum crash_obj_type type)
754 {
755         u32 localSavedCrashMsgOffset;
756         u16 localSavedCrashMsgCount;
757
758         /* get saved message count */
759         if (visorchannel_read(controlvm_channel,
760                               offsetof(struct spar_controlvm_channel_protocol,
761                                        saved_crash_message_count),
762                               &localSavedCrashMsgCount, sizeof(u16)) < 0) {
763                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
764                                  POSTCODE_SEVERITY_ERR);
765                 return;
766         }
767
768         if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
769                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
770                                  localSavedCrashMsgCount,
771                                  POSTCODE_SEVERITY_ERR);
772                 return;
773         }
774
775         /* get saved crash message offset */
776         if (visorchannel_read(controlvm_channel,
777                               offsetof(struct spar_controlvm_channel_protocol,
778                                        saved_crash_message_offset),
779                               &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
780                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
781                                  POSTCODE_SEVERITY_ERR);
782                 return;
783         }
784
785         if (type == CRASH_BUS) {
786                 if (visorchannel_write(controlvm_channel,
787                                        localSavedCrashMsgOffset,
788                                        msg,
789                                        sizeof(struct controlvm_message)) < 0) {
790                         POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
791                                          POSTCODE_SEVERITY_ERR);
792                         return;
793                 }
794         } else {
795                 if (visorchannel_write(controlvm_channel,
796                                        localSavedCrashMsgOffset +
797                                        sizeof(struct controlvm_message), msg,
798                                        sizeof(struct controlvm_message)) < 0) {
799                         POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
800                                          POSTCODE_SEVERITY_ERR);
801                         return;
802                 }
803         }
804 }
805 EXPORT_SYMBOL_GPL(visorchipset_save_message);
806
807 static void
808 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
809 {
810         struct visorchipset_bus_info *p = NULL;
811         BOOL need_clear = FALSE;
812
813         p = findbus(&bus_info_list, busNo);
814         if (!p)
815                 return;
816
817         if (response < 0) {
818                 if ((cmdId == CONTROLVM_BUS_CREATE) &&
819                     (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
820                         /* undo the row we just created... */
821                         delbusdevices(&dev_info_list, busNo);
822         } else {
823                 if (cmdId == CONTROLVM_BUS_CREATE)
824                         p->state.created = 1;
825                 if (cmdId == CONTROLVM_BUS_DESTROY)
826                         need_clear = TRUE;
827         }
828
829         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
830                 return;         /* no controlvm response needed */
831         if (p->pending_msg_hdr.id != (u32) cmdId)
832                 return;
833         controlvm_respond(&p->pending_msg_hdr, response);
834         p->pending_msg_hdr.id = CONTROLVM_INVALID;
835         if (need_clear) {
836                 busInfo_clear(p);
837                 delbusdevices(&dev_info_list, busNo);
838         }
839 }
840
841 static void
842 device_changestate_responder(enum controlvm_id cmdId,
843                              ulong busNo, ulong devNo, int response,
844                              struct spar_segment_state responseState)
845 {
846         struct visorchipset_device_info *p = NULL;
847         struct controlvm_message outmsg;
848
849         p = finddevice(&dev_info_list, busNo, devNo);
850         if (!p)
851                 return;
852         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
853                 return;         /* no controlvm response needed */
854         if (p->pending_msg_hdr.id != cmdId)
855                 return;
856
857         controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
858
859         outmsg.cmd.device_change_state.bus_no = busNo;
860         outmsg.cmd.device_change_state.dev_no = devNo;
861         outmsg.cmd.device_change_state.state = responseState;
862
863         if (!visorchannel_signalinsert(controlvm_channel,
864                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
865                 return;
866
867         p->pending_msg_hdr.id = CONTROLVM_INVALID;
868 }
869
870 static void
871 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
872                  int response)
873 {
874         struct visorchipset_device_info *p = NULL;
875         BOOL need_clear = FALSE;
876
877         p = finddevice(&dev_info_list, busNo, devNo);
878         if (!p)
879                 return;
880         if (response >= 0) {
881                 if (cmdId == CONTROLVM_DEVICE_CREATE)
882                         p->state.created = 1;
883                 if (cmdId == CONTROLVM_DEVICE_DESTROY)
884                         need_clear = TRUE;
885         }
886
887         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
888                 return;         /* no controlvm response needed */
889
890         if (p->pending_msg_hdr.id != (u32) cmdId)
891                 return;
892
893         controlvm_respond(&p->pending_msg_hdr, response);
894         p->pending_msg_hdr.id = CONTROLVM_INVALID;
895         if (need_clear)
896                 devInfo_clear(p);
897 }
898
899 static void
900 bus_epilog(u32 busNo,
901            u32 cmd, struct controlvm_message_header *msgHdr,
902            int response, BOOL needResponse)
903 {
904         BOOL notified = FALSE;
905
906         struct visorchipset_bus_info *pBusInfo = findbus(&bus_info_list, busNo);
907
908         if (!pBusInfo)
909                 return;
910
911         if (needResponse) {
912                 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
913                        sizeof(struct controlvm_message_header));
914         } else
915                 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
916
917         down(&notifier_lock);
918         if (response == CONTROLVM_RESP_SUCCESS) {
919                 switch (cmd) {
920                 case CONTROLVM_BUS_CREATE:
921                         /* We can't tell from the bus_create
922                         * information which of our 2 bus flavors the
923                         * devices on this bus will ultimately end up.
924                         * FORTUNATELY, it turns out it is harmless to
925                         * send the bus_create to both of them.  We can
926                         * narrow things down a little bit, though,
927                         * because we know: - BusDev_Server can handle
928                         * either server or client devices
929                         * - BusDev_Client can handle ONLY client
930                         * devices */
931                         if (BusDev_Server_Notifiers.bus_create) {
932                                 (*BusDev_Server_Notifiers.bus_create) (busNo);
933                                 notified = TRUE;
934                         }
935                         if ((!pBusInfo->flags.server) /*client */ &&
936                             BusDev_Client_Notifiers.bus_create) {
937                                 (*BusDev_Client_Notifiers.bus_create) (busNo);
938                                 notified = TRUE;
939                         }
940                         break;
941                 case CONTROLVM_BUS_DESTROY:
942                         if (BusDev_Server_Notifiers.bus_destroy) {
943                                 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
944                                 notified = TRUE;
945                         }
946                         if ((!pBusInfo->flags.server) /*client */ &&
947                             BusDev_Client_Notifiers.bus_destroy) {
948                                 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
949                                 notified = TRUE;
950                         }
951                         break;
952                 }
953         }
954         if (notified)
955                 /* The callback function just called above is responsible
956                  * for calling the appropriate visorchipset_busdev_responders
957                  * function, which will call bus_responder()
958                  */
959                 ;
960         else
961                 bus_responder(cmd, busNo, response);
962         up(&notifier_lock);
963 }
964
965 static void
966 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
967               struct controlvm_message_header *msgHdr, int response,
968               BOOL needResponse, BOOL for_visorbus)
969 {
970         struct visorchipset_busdev_notifiers *notifiers = NULL;
971         BOOL notified = FALSE;
972
973         struct visorchipset_device_info *pDevInfo =
974                 finddevice(&dev_info_list, busNo, devNo);
975         char *envp[] = {
976                 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
977                 NULL
978         };
979
980         if (!pDevInfo)
981                 return;
982
983         if (for_visorbus)
984                 notifiers = &BusDev_Server_Notifiers;
985         else
986                 notifiers = &BusDev_Client_Notifiers;
987         if (needResponse) {
988                 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
989                        sizeof(struct controlvm_message_header));
990         } else
991                 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
992
993         down(&notifier_lock);
994         if (response >= 0) {
995                 switch (cmd) {
996                 case CONTROLVM_DEVICE_CREATE:
997                         if (notifiers->device_create) {
998                                 (*notifiers->device_create) (busNo, devNo);
999                                 notified = TRUE;
1000                         }
1001                         break;
1002                 case CONTROLVM_DEVICE_CHANGESTATE:
1003                         /* ServerReady / ServerRunning / SegmentStateRunning */
1004                         if (state.alive == segment_state_running.alive &&
1005                             state.operating ==
1006                                 segment_state_running.operating) {
1007                                 if (notifiers->device_resume) {
1008                                         (*notifiers->device_resume) (busNo,
1009                                                                      devNo);
1010                                         notified = TRUE;
1011                                 }
1012                         }
1013                         /* ServerNotReady / ServerLost / SegmentStateStandby */
1014                         else if (state.alive == segment_state_standby.alive &&
1015                                  state.operating ==
1016                                  segment_state_standby.operating) {
1017                                 /* technically this is standby case
1018                                  * where server is lost
1019                                  */
1020                                 if (notifiers->device_pause) {
1021                                         (*notifiers->device_pause) (busNo,
1022                                                                     devNo);
1023                                         notified = TRUE;
1024                                 }
1025                         } else if (state.alive == segment_state_paused.alive &&
1026                                    state.operating ==
1027                                    segment_state_paused.operating) {
1028                                 /* this is lite pause where channel is
1029                                  * still valid just 'pause' of it
1030                                  */
1031                                 if (busNo == g_diagpool_bus_no &&
1032                                     devNo == g_diagpool_dev_no) {
1033                                         /* this will trigger the
1034                                          * diag_shutdown.sh script in
1035                                          * the visorchipset hotplug */
1036                                         kobject_uevent_env
1037                                             (&Visorchipset_platform_device.dev.
1038                                              kobj, KOBJ_ONLINE, envp);
1039                                 }
1040                         }
1041                         break;
1042                 case CONTROLVM_DEVICE_DESTROY:
1043                         if (notifiers->device_destroy) {
1044                                 (*notifiers->device_destroy) (busNo, devNo);
1045                                 notified = TRUE;
1046                         }
1047                         break;
1048                 }
1049         }
1050         if (notified)
1051                 /* The callback function just called above is responsible
1052                  * for calling the appropriate visorchipset_busdev_responders
1053                  * function, which will call device_responder()
1054                  */
1055                 ;
1056         else
1057                 device_responder(cmd, busNo, devNo, response);
1058         up(&notifier_lock);
1059 }
1060
1061 static void
1062 bus_create(struct controlvm_message *inmsg)
1063 {
1064         struct controlvm_message_packet *cmd = &inmsg->cmd;
1065         ulong busNo = cmd->create_bus.bus_no;
1066         int rc = CONTROLVM_RESP_SUCCESS;
1067         struct visorchipset_bus_info *pBusInfo = NULL;
1068
1069
1070         pBusInfo = findbus(&bus_info_list, busNo);
1071         if (pBusInfo && (pBusInfo->state.created == 1)) {
1072                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1073                                  POSTCODE_SEVERITY_ERR);
1074                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1075                 goto Away;
1076         }
1077         pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1078         if (pBusInfo == NULL) {
1079                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1080                                  POSTCODE_SEVERITY_ERR);
1081                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1082                 goto Away;
1083         }
1084
1085         INIT_LIST_HEAD(&pBusInfo->entry);
1086         pBusInfo->bus_no = busNo;
1087         pBusInfo->dev_no = cmd->create_bus.dev_count;
1088
1089         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1090
1091         if (inmsg->hdr.flags.test_message == 1)
1092                 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1093         else
1094                 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1095
1096         pBusInfo->flags.server = inmsg->hdr.flags.server;
1097         pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1098         pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1099         pBusInfo->chan_info.channel_type_uuid =
1100                         cmd->create_bus.bus_data_type_uuid;
1101         pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1102
1103         list_add(&pBusInfo->entry, &bus_info_list);
1104
1105         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1106
1107 Away:
1108         bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1109                    rc, inmsg->hdr.flags.response_expected == 1);
1110 }
1111
1112 static void
1113 bus_destroy(struct controlvm_message *inmsg)
1114 {
1115         struct controlvm_message_packet *cmd = &inmsg->cmd;
1116         ulong busNo = cmd->destroy_bus.bus_no;
1117         struct visorchipset_bus_info *pBusInfo;
1118         int rc = CONTROLVM_RESP_SUCCESS;
1119
1120         pBusInfo = findbus(&bus_info_list, busNo);
1121         if (!pBusInfo) {
1122                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1123                 goto Away;
1124         }
1125         if (pBusInfo->state.created == 0) {
1126                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1127                 goto Away;
1128         }
1129
1130 Away:
1131         bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1132                    rc, inmsg->hdr.flags.response_expected == 1);
1133 }
1134
1135 static void
1136 bus_configure(struct controlvm_message *inmsg,
1137               struct parser_context *parser_ctx)
1138 {
1139         struct controlvm_message_packet *cmd = &inmsg->cmd;
1140         ulong busNo = cmd->configure_bus.bus_no;
1141         struct visorchipset_bus_info *pBusInfo = NULL;
1142         int rc = CONTROLVM_RESP_SUCCESS;
1143         char s[99];
1144
1145         busNo = cmd->configure_bus.bus_no;
1146         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1147
1148         pBusInfo = findbus(&bus_info_list, busNo);
1149         if (!pBusInfo) {
1150                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1151                                  POSTCODE_SEVERITY_ERR);
1152                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1153                 goto Away;
1154         }
1155         if (pBusInfo->state.created == 0) {
1156                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1157                                  POSTCODE_SEVERITY_ERR);
1158                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1159                 goto Away;
1160         }
1161         /* TBD - add this check to other commands also... */
1162         if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1163                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1164                                  POSTCODE_SEVERITY_ERR);
1165                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1166                 goto Away;
1167         }
1168
1169         pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1170         pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1171         parser_param_start(parser_ctx, PARSERSTRING_NAME);
1172         pBusInfo->name = parser_string_get(parser_ctx);
1173
1174         visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1175         POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1176 Away:
1177         bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1178                    rc, inmsg->hdr.flags.response_expected == 1);
1179 }
1180
1181 static void
1182 my_device_create(struct controlvm_message *inmsg)
1183 {
1184         struct controlvm_message_packet *cmd = &inmsg->cmd;
1185         ulong busNo = cmd->create_device.bus_no;
1186         ulong devNo = cmd->create_device.dev_no;
1187         struct visorchipset_device_info *pDevInfo = NULL;
1188         struct visorchipset_bus_info *pBusInfo = NULL;
1189         int rc = CONTROLVM_RESP_SUCCESS;
1190
1191         pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1192         if (pDevInfo && (pDevInfo->state.created == 1)) {
1193                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1194                                  POSTCODE_SEVERITY_ERR);
1195                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1196                 goto Away;
1197         }
1198         pBusInfo = findbus(&bus_info_list, busNo);
1199         if (!pBusInfo) {
1200                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1201                                  POSTCODE_SEVERITY_ERR);
1202                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1203                 goto Away;
1204         }
1205         if (pBusInfo->state.created == 0) {
1206                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1207                                  POSTCODE_SEVERITY_ERR);
1208                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1209                 goto Away;
1210         }
1211         pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1212         if (pDevInfo == NULL) {
1213                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1214                                  POSTCODE_SEVERITY_ERR);
1215                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1216                 goto Away;
1217         }
1218
1219         INIT_LIST_HEAD(&pDevInfo->entry);
1220         pDevInfo->bus_no = busNo;
1221         pDevInfo->dev_no = devNo;
1222         pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1223         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1224                          POSTCODE_SEVERITY_INFO);
1225
1226         if (inmsg->hdr.flags.test_message == 1)
1227                 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1228         else
1229                 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1230         pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1231         pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1232         pDevInfo->chan_info.channel_type_uuid =
1233                         cmd->create_device.data_type_uuid;
1234         pDevInfo->chan_info.intr = cmd->create_device.intr;
1235         list_add(&pDevInfo->entry, &dev_info_list);
1236         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1237                          POSTCODE_SEVERITY_INFO);
1238 Away:
1239         /* get the bus and devNo for DiagPool channel */
1240         if (pDevInfo &&
1241             is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1242                 g_diagpool_bus_no = busNo;
1243                 g_diagpool_dev_no = devNo;
1244         }
1245         device_epilog(busNo, devNo, segment_state_running,
1246                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1247                       inmsg->hdr.flags.response_expected == 1,
1248                       FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1249 }
1250
1251 static void
1252 my_device_changestate(struct controlvm_message *inmsg)
1253 {
1254         struct controlvm_message_packet *cmd = &inmsg->cmd;
1255         ulong busNo = cmd->device_change_state.bus_no;
1256         ulong devNo = cmd->device_change_state.dev_no;
1257         struct spar_segment_state state = cmd->device_change_state.state;
1258         struct visorchipset_device_info *pDevInfo = NULL;
1259         int rc = CONTROLVM_RESP_SUCCESS;
1260
1261         pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1262         if (!pDevInfo) {
1263                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1264                                  POSTCODE_SEVERITY_ERR);
1265                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1266                 goto Away;
1267         }
1268         if (pDevInfo->state.created == 0) {
1269                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1270                                  POSTCODE_SEVERITY_ERR);
1271                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1272         }
1273 Away:
1274         if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1275                 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1276                               &inmsg->hdr, rc,
1277                               inmsg->hdr.flags.response_expected == 1,
1278                               FOR_VISORBUS(
1279                                         pDevInfo->chan_info.channel_type_uuid));
1280 }
1281
1282 static void
1283 my_device_destroy(struct controlvm_message *inmsg)
1284 {
1285         struct controlvm_message_packet *cmd = &inmsg->cmd;
1286         ulong busNo = cmd->destroy_device.bus_no;
1287         ulong devNo = cmd->destroy_device.dev_no;
1288         struct visorchipset_device_info *pDevInfo = NULL;
1289         int rc = CONTROLVM_RESP_SUCCESS;
1290
1291         pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1292         if (!pDevInfo) {
1293                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1294                 goto Away;
1295         }
1296         if (pDevInfo->state.created == 0) {
1297                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1298         }
1299
1300 Away:
1301         if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1302                 device_epilog(busNo, devNo, segment_state_running,
1303                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1304                               inmsg->hdr.flags.response_expected == 1,
1305                               FOR_VISORBUS(
1306                                         pDevInfo->chan_info.channel_type_uuid));
1307 }
1308
1309 /* When provided with the physical address of the controlvm channel
1310  * (phys_addr), the offset to the payload area we need to manage
1311  * (offset), and the size of this payload area (bytes), fills in the
1312  * controlvm_payload_info struct.  Returns TRUE for success or FALSE
1313  * for failure.
1314  */
1315 static int
1316 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1317                                   struct controlvm_payload_info *info)
1318 {
1319         u8 __iomem *payload = NULL;
1320         int rc = CONTROLVM_RESP_SUCCESS;
1321
1322         if (info == NULL) {
1323                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1324                 goto Away;
1325         }
1326         memset(info, 0, sizeof(struct controlvm_payload_info));
1327         if ((offset == 0) || (bytes == 0)) {
1328                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1329                 goto Away;
1330         }
1331         payload = ioremap_cache(phys_addr + offset, bytes);
1332         if (payload == NULL) {
1333                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1334                 goto Away;
1335         }
1336
1337         info->offset = offset;
1338         info->bytes = bytes;
1339         info->ptr = payload;
1340
1341 Away:
1342         if (rc < 0) {
1343                 if (payload != NULL) {
1344                         iounmap(payload);
1345                         payload = NULL;
1346                 }
1347         }
1348         return rc;
1349 }
1350
1351 static void
1352 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1353 {
1354         if (info->ptr != NULL) {
1355                 iounmap(info->ptr);
1356                 info->ptr = NULL;
1357         }
1358         memset(info, 0, sizeof(struct controlvm_payload_info));
1359 }
1360
1361 static void
1362 initialize_controlvm_payload(void)
1363 {
1364         HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1365         u64 payloadOffset = 0;
1366         u32 payloadBytes = 0;
1367
1368         if (visorchannel_read(controlvm_channel,
1369                               offsetof(struct spar_controlvm_channel_protocol,
1370                                        request_payload_offset),
1371                               &payloadOffset, sizeof(payloadOffset)) < 0) {
1372                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1373                                  POSTCODE_SEVERITY_ERR);
1374                 return;
1375         }
1376         if (visorchannel_read(controlvm_channel,
1377                               offsetof(struct spar_controlvm_channel_protocol,
1378                                        request_payload_bytes),
1379                               &payloadBytes, sizeof(payloadBytes)) < 0) {
1380                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1381                                  POSTCODE_SEVERITY_ERR);
1382                 return;
1383         }
1384         initialize_controlvm_payload_info(phys_addr,
1385                                           payloadOffset, payloadBytes,
1386                                           &ControlVm_payload_info);
1387 }
1388
1389 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1390  *  Returns CONTROLVM_RESP_xxx code.
1391  */
1392 int
1393 visorchipset_chipset_ready(void)
1394 {
1395         kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1396         return CONTROLVM_RESP_SUCCESS;
1397 }
1398 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1399
1400 int
1401 visorchipset_chipset_selftest(void)
1402 {
1403         char env_selftest[20];
1404         char *envp[] = { env_selftest, NULL };
1405
1406         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1407         kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1408                            envp);
1409         return CONTROLVM_RESP_SUCCESS;
1410 }
1411 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1412
1413 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1414  *  Returns CONTROLVM_RESP_xxx code.
1415  */
1416 int
1417 visorchipset_chipset_notready(void)
1418 {
1419         kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1420         return CONTROLVM_RESP_SUCCESS;
1421 }
1422 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1423
1424 static void
1425 chipset_ready(struct controlvm_message_header *msgHdr)
1426 {
1427         int rc = visorchipset_chipset_ready();
1428
1429         if (rc != CONTROLVM_RESP_SUCCESS)
1430                 rc = -rc;
1431         if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1432                 controlvm_respond(msgHdr, rc);
1433         if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1434                 /* Send CHIPSET_READY response when all modules have been loaded
1435                  * and disks mounted for the partition
1436                  */
1437                 g_chipset_msg_hdr = *msgHdr;
1438         }
1439 }
1440
1441 static void
1442 chipset_selftest(struct controlvm_message_header *msgHdr)
1443 {
1444         int rc = visorchipset_chipset_selftest();
1445
1446         if (rc != CONTROLVM_RESP_SUCCESS)
1447                 rc = -rc;
1448         if (msgHdr->flags.response_expected)
1449                 controlvm_respond(msgHdr, rc);
1450 }
1451
1452 static void
1453 chipset_notready(struct controlvm_message_header *msgHdr)
1454 {
1455         int rc = visorchipset_chipset_notready();
1456
1457         if (rc != CONTROLVM_RESP_SUCCESS)
1458                 rc = -rc;
1459         if (msgHdr->flags.response_expected)
1460                 controlvm_respond(msgHdr, rc);
1461 }
1462
1463 /* This is your "one-stop" shop for grabbing the next message from the
1464  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1465  */
1466 static BOOL
1467 read_controlvm_event(struct controlvm_message *msg)
1468 {
1469         if (visorchannel_signalremove(controlvm_channel,
1470                                       CONTROLVM_QUEUE_EVENT, msg)) {
1471                 /* got a message */
1472                 if (msg->hdr.flags.test_message == 1)
1473                         return FALSE;
1474                 return TRUE;
1475         }
1476         return FALSE;
1477 }
1478
1479 /*
1480  * The general parahotplug flow works as follows.  The visorchipset
1481  * driver receives a DEVICE_CHANGESTATE message from Command
1482  * specifying a physical device to enable or disable.  The CONTROLVM
1483  * message handler calls parahotplug_process_message, which then adds
1484  * the message to a global list and kicks off a udev event which
1485  * causes a user level script to enable or disable the specified
1486  * device.  The udev script then writes to
1487  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1488  * to get called, at which point the appropriate CONTROLVM message is
1489  * retrieved from the list and responded to.
1490  */
1491
1492 #define PARAHOTPLUG_TIMEOUT_MS 2000
1493
1494 /*
1495  * Generate unique int to match an outstanding CONTROLVM message with a
1496  * udev script /proc response
1497  */
1498 static int
1499 parahotplug_next_id(void)
1500 {
1501         static atomic_t id = ATOMIC_INIT(0);
1502
1503         return atomic_inc_return(&id);
1504 }
1505
1506 /*
1507  * Returns the time (in jiffies) when a CONTROLVM message on the list
1508  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1509  */
1510 static unsigned long
1511 parahotplug_next_expiration(void)
1512 {
1513         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1514 }
1515
1516 /*
1517  * Create a parahotplug_request, which is basically a wrapper for a
1518  * CONTROLVM_MESSAGE that we can stick on a list
1519  */
1520 static struct parahotplug_request *
1521 parahotplug_request_create(struct controlvm_message *msg)
1522 {
1523         struct parahotplug_request *req;
1524
1525         req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
1526         if (req == NULL)
1527                 return NULL;
1528
1529         req->id = parahotplug_next_id();
1530         req->expiration = parahotplug_next_expiration();
1531         req->msg = *msg;
1532
1533         return req;
1534 }
1535
1536 /*
1537  * Free a parahotplug_request.
1538  */
1539 static void
1540 parahotplug_request_destroy(struct parahotplug_request *req)
1541 {
1542         kfree(req);
1543 }
1544
1545 /*
1546  * Cause uevent to run the user level script to do the disable/enable
1547  * specified in (the CONTROLVM message in) the specified
1548  * parahotplug_request
1549  */
1550 static void
1551 parahotplug_request_kickoff(struct parahotplug_request *req)
1552 {
1553         struct controlvm_message_packet *cmd = &req->msg.cmd;
1554         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1555             env_func[40];
1556         char *envp[] = {
1557                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1558         };
1559
1560         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1561         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1562         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1563                 cmd->device_change_state.state.active);
1564         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1565                 cmd->device_change_state.bus_no);
1566         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1567                 cmd->device_change_state.dev_no >> 3);
1568         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1569                 cmd->device_change_state.dev_no & 0x7);
1570
1571         kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1572                            envp);
1573 }
1574
1575 /*
1576  * Remove any request from the list that's been on there too long and
1577  * respond with an error.
1578  */
1579 static void
1580 parahotplug_process_list(void)
1581 {
1582         struct list_head *pos = NULL;
1583         struct list_head *tmp = NULL;
1584
1585         spin_lock(&Parahotplug_request_list_lock);
1586
1587         list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1588                 struct parahotplug_request *req =
1589                     list_entry(pos, struct parahotplug_request, list);
1590                 if (time_after_eq(jiffies, req->expiration)) {
1591                         list_del(pos);
1592                         if (req->msg.hdr.flags.response_expected)
1593                                 controlvm_respond_physdev_changestate(
1594                                         &req->msg.hdr,
1595                                         CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1596                                         req->msg.cmd.device_change_state.state);
1597                         parahotplug_request_destroy(req);
1598                 }
1599         }
1600
1601         spin_unlock(&Parahotplug_request_list_lock);
1602 }
1603
1604 /*
1605  * Called from the /proc handler, which means the user script has
1606  * finished the enable/disable.  Find the matching identifier, and
1607  * respond to the CONTROLVM message with success.
1608  */
1609 static int
1610 parahotplug_request_complete(int id, u16 active)
1611 {
1612         struct list_head *pos = NULL;
1613         struct list_head *tmp = NULL;
1614
1615         spin_lock(&Parahotplug_request_list_lock);
1616
1617         /* Look for a request matching "id". */
1618         list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1619                 struct parahotplug_request *req =
1620                     list_entry(pos, struct parahotplug_request, list);
1621                 if (req->id == id) {
1622                         /* Found a match.  Remove it from the list and
1623                          * respond.
1624                          */
1625                         list_del(pos);
1626                         spin_unlock(&Parahotplug_request_list_lock);
1627                         req->msg.cmd.device_change_state.state.active = active;
1628                         if (req->msg.hdr.flags.response_expected)
1629                                 controlvm_respond_physdev_changestate(
1630                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1631                                         req->msg.cmd.device_change_state.state);
1632                         parahotplug_request_destroy(req);
1633                         return 0;
1634                 }
1635         }
1636
1637         spin_unlock(&Parahotplug_request_list_lock);
1638         return -1;
1639 }
1640
1641 /*
1642  * Enables or disables a PCI device by kicking off a udev script
1643  */
1644 static void
1645 parahotplug_process_message(struct controlvm_message *inmsg)
1646 {
1647         struct parahotplug_request *req;
1648
1649         req = parahotplug_request_create(inmsg);
1650
1651         if (req == NULL)
1652                 return;
1653
1654         if (inmsg->cmd.device_change_state.state.active) {
1655                 /* For enable messages, just respond with success
1656                 * right away.  This is a bit of a hack, but there are
1657                 * issues with the early enable messages we get (with
1658                 * either the udev script not detecting that the device
1659                 * is up, or not getting called at all).  Fortunately
1660                 * the messages that get lost don't matter anyway, as
1661                 * devices are automatically enabled at
1662                 * initialization.
1663                 */
1664                 parahotplug_request_kickoff(req);
1665                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1666                                 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1667                                 device_change_state.state);
1668                 parahotplug_request_destroy(req);
1669         } else {
1670                 /* For disable messages, add the request to the
1671                 * request list before kicking off the udev script.  It
1672                 * won't get responded to until the script has
1673                 * indicated it's done.
1674                 */
1675                 spin_lock(&Parahotplug_request_list_lock);
1676                 list_add_tail(&(req->list), &Parahotplug_request_list);
1677                 spin_unlock(&Parahotplug_request_list_lock);
1678
1679                 parahotplug_request_kickoff(req);
1680         }
1681 }
1682
1683 /* Process a controlvm message.
1684  * Return result:
1685  *    FALSE - this function will return FALSE only in the case where the
1686  *            controlvm message was NOT processed, but processing must be
1687  *            retried before reading the next controlvm message; a
1688  *            scenario where this can occur is when we need to throttle
1689  *            the allocation of memory in which to copy out controlvm
1690  *            payload data
1691  *    TRUE  - processing of the controlvm message completed,
1692  *            either successfully or with an error.
1693  */
1694 static BOOL
1695 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1696 {
1697         struct controlvm_message_packet *cmd = &inmsg.cmd;
1698         u64 parametersAddr = 0;
1699         u32 parametersBytes = 0;
1700         struct parser_context *parser_ctx = NULL;
1701         BOOL isLocalAddr = FALSE;
1702         struct controlvm_message ackmsg;
1703
1704         /* create parsing context if necessary */
1705         isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1706         if (channel_addr == 0)
1707                 return TRUE;
1708         parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1709         parametersBytes = inmsg.hdr.payload_bytes;
1710
1711         /* Parameter and channel addresses within test messages actually lie
1712          * within our OS-controlled memory.  We need to know that, because it
1713          * makes a difference in how we compute the virtual address.
1714          */
1715         if (parametersAddr != 0 && parametersBytes != 0) {
1716                 BOOL retry = FALSE;
1717
1718                 parser_ctx =
1719                     parser_init_byte_stream(parametersAddr, parametersBytes,
1720                                            isLocalAddr, &retry);
1721                 if (!parser_ctx && retry)
1722                         return FALSE;
1723         }
1724
1725         if (!isLocalAddr) {
1726                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1727                                         CONTROLVM_RESP_SUCCESS);
1728                 if (controlvm_channel)
1729                         visorchannel_signalinsert(controlvm_channel,
1730                                                   CONTROLVM_QUEUE_ACK,
1731                                                   &ackmsg);
1732         }
1733         switch (inmsg.hdr.id) {
1734         case CONTROLVM_CHIPSET_INIT:
1735                 chipset_init(&inmsg);
1736                 break;
1737         case CONTROLVM_BUS_CREATE:
1738                 bus_create(&inmsg);
1739                 break;
1740         case CONTROLVM_BUS_DESTROY:
1741                 bus_destroy(&inmsg);
1742                 break;
1743         case CONTROLVM_BUS_CONFIGURE:
1744                 bus_configure(&inmsg, parser_ctx);
1745                 break;
1746         case CONTROLVM_DEVICE_CREATE:
1747                 my_device_create(&inmsg);
1748                 break;
1749         case CONTROLVM_DEVICE_CHANGESTATE:
1750                 if (cmd->device_change_state.flags.phys_device) {
1751                         parahotplug_process_message(&inmsg);
1752                 } else {
1753                         /* save the hdr and cmd structures for later use */
1754                         /* when sending back the response to Command */
1755                         my_device_changestate(&inmsg);
1756                         g_diag_msg_hdr = inmsg.hdr;
1757                         g_devicechangestate_packet = inmsg.cmd;
1758                         break;
1759                 }
1760                 break;
1761         case CONTROLVM_DEVICE_DESTROY:
1762                 my_device_destroy(&inmsg);
1763                 break;
1764         case CONTROLVM_DEVICE_CONFIGURE:
1765                 /* no op for now, just send a respond that we passed */
1766                 if (inmsg.hdr.flags.response_expected)
1767                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1768                 break;
1769         case CONTROLVM_CHIPSET_READY:
1770                 chipset_ready(&inmsg.hdr);
1771                 break;
1772         case CONTROLVM_CHIPSET_SELFTEST:
1773                 chipset_selftest(&inmsg.hdr);
1774                 break;
1775         case CONTROLVM_CHIPSET_STOP:
1776                 chipset_notready(&inmsg.hdr);
1777                 break;
1778         default:
1779                 if (inmsg.hdr.flags.response_expected)
1780                         controlvm_respond(&inmsg.hdr,
1781                                           -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1782                 break;
1783         }
1784
1785         if (parser_ctx != NULL) {
1786                 parser_done(parser_ctx);
1787                 parser_ctx = NULL;
1788         }
1789         return TRUE;
1790 }
1791
1792 static HOSTADDRESS controlvm_get_channel_address(void)
1793 {
1794         u64 addr = 0;
1795         u32 size = 0;
1796
1797         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1798                 return 0;
1799
1800         return addr;
1801 }
1802
1803 static void
1804 controlvm_periodic_work(struct work_struct *work)
1805 {
1806         struct controlvm_message inmsg;
1807         BOOL gotACommand = FALSE;
1808         BOOL handle_command_failed = FALSE;
1809         static u64 Poll_Count;
1810
1811         /* make sure visorbus server is registered for controlvm callbacks */
1812         if (visorchipset_serverregwait && !serverregistered)
1813                 goto Away;
1814         /* make sure visorclientbus server is regsitered for controlvm
1815          * callbacks
1816          */
1817         if (visorchipset_clientregwait && !clientregistered)
1818                 goto Away;
1819
1820         Poll_Count++;
1821         if (Poll_Count >= 250)
1822                 ;       /* keep going */
1823         else
1824                 goto Away;
1825
1826         /* Check events to determine if response to CHIPSET_READY
1827          * should be sent
1828          */
1829         if (visorchipset_holdchipsetready &&
1830             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1831                 if (check_chipset_events() == 1) {
1832                         controlvm_respond(&g_chipset_msg_hdr, 0);
1833                         clear_chipset_events();
1834                         memset(&g_chipset_msg_hdr, 0,
1835                                sizeof(struct controlvm_message_header));
1836                 }
1837         }
1838
1839         while (visorchannel_signalremove(controlvm_channel,
1840                                          CONTROLVM_QUEUE_RESPONSE,
1841                                          &inmsg))
1842                 ;
1843         if (!gotACommand) {
1844                 if (ControlVm_Pending_Msg_Valid) {
1845                         /* we throttled processing of a prior
1846                         * msg, so try to process it again
1847                         * rather than reading a new one
1848                         */
1849                         inmsg = ControlVm_Pending_Msg;
1850                         ControlVm_Pending_Msg_Valid = FALSE;
1851                         gotACommand = TRUE;
1852                 } else
1853                         gotACommand = read_controlvm_event(&inmsg);
1854         }
1855
1856         handle_command_failed = FALSE;
1857         while (gotACommand && (!handle_command_failed)) {
1858                 most_recent_message_jiffies = jiffies;
1859                 if (handle_command(inmsg,
1860                                    visorchannel_get_physaddr
1861                                    (controlvm_channel)))
1862                         gotACommand = read_controlvm_event(&inmsg);
1863                 else {
1864                         /* this is a scenario where throttling
1865                         * is required, but probably NOT an
1866                         * error...; we stash the current
1867                         * controlvm msg so we will attempt to
1868                         * reprocess it on our next loop
1869                         */
1870                         handle_command_failed = TRUE;
1871                         ControlVm_Pending_Msg = inmsg;
1872                         ControlVm_Pending_Msg_Valid = TRUE;
1873                 }
1874         }
1875
1876         /* parahotplug_worker */
1877         parahotplug_process_list();
1878
1879 Away:
1880
1881         if (time_after(jiffies,
1882                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1883                 /* it's been longer than MIN_IDLE_SECONDS since we
1884                 * processed our last controlvm message; slow down the
1885                 * polling
1886                 */
1887                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1888                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1889         } else {
1890                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1891                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1892         }
1893
1894         queue_delayed_work(periodic_controlvm_workqueue,
1895                            &periodic_controlvm_work, poll_jiffies);
1896 }
1897
1898 static void
1899 setup_crash_devices_work_queue(struct work_struct *work)
1900 {
1901
1902         struct controlvm_message localCrashCreateBusMsg;
1903         struct controlvm_message localCrashCreateDevMsg;
1904         struct controlvm_message msg;
1905         u32 localSavedCrashMsgOffset;
1906         u16 localSavedCrashMsgCount;
1907
1908         /* make sure visorbus server is registered for controlvm callbacks */
1909         if (visorchipset_serverregwait && !serverregistered)
1910                 goto Away;
1911
1912         /* make sure visorclientbus server is regsitered for controlvm
1913          * callbacks
1914          */
1915         if (visorchipset_clientregwait && !clientregistered)
1916                 goto Away;
1917
1918         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1919
1920         /* send init chipset msg */
1921         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1922         msg.cmd.init_chipset.bus_count = 23;
1923         msg.cmd.init_chipset.switch_count = 0;
1924
1925         chipset_init(&msg);
1926
1927         /* get saved message count */
1928         if (visorchannel_read(controlvm_channel,
1929                               offsetof(struct spar_controlvm_channel_protocol,
1930                                        saved_crash_message_count),
1931                               &localSavedCrashMsgCount, sizeof(u16)) < 0) {
1932                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1933                                  POSTCODE_SEVERITY_ERR);
1934                 return;
1935         }
1936
1937         if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
1938                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1939                                  localSavedCrashMsgCount,
1940                                  POSTCODE_SEVERITY_ERR);
1941                 return;
1942         }
1943
1944         /* get saved crash message offset */
1945         if (visorchannel_read(controlvm_channel,
1946                               offsetof(struct spar_controlvm_channel_protocol,
1947                                        saved_crash_message_offset),
1948                               &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
1949                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1950                                  POSTCODE_SEVERITY_ERR);
1951                 return;
1952         }
1953
1954         /* read create device message for storage bus offset */
1955         if (visorchannel_read(controlvm_channel,
1956                               localSavedCrashMsgOffset,
1957                               &localCrashCreateBusMsg,
1958                               sizeof(struct controlvm_message)) < 0) {
1959                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1960                                  POSTCODE_SEVERITY_ERR);
1961                 return;
1962         }
1963
1964         /* read create device message for storage device */
1965         if (visorchannel_read(controlvm_channel,
1966                               localSavedCrashMsgOffset +
1967                               sizeof(struct controlvm_message),
1968                               &localCrashCreateDevMsg,
1969                               sizeof(struct controlvm_message)) < 0) {
1970                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1971                                  POSTCODE_SEVERITY_ERR);
1972                 return;
1973         }
1974
1975         /* reuse IOVM create bus message */
1976         if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
1977                 bus_create(&localCrashCreateBusMsg);
1978         else {
1979                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1980                                  POSTCODE_SEVERITY_ERR);
1981                 return;
1982         }
1983
1984         /* reuse create device message for storage device */
1985         if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
1986                 my_device_create(&localCrashCreateDevMsg);
1987         else {
1988                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1989                                  POSTCODE_SEVERITY_ERR);
1990                 return;
1991         }
1992         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1993         return;
1994
1995 Away:
1996
1997         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1998
1999         queue_delayed_work(periodic_controlvm_workqueue,
2000                            &periodic_controlvm_work, poll_jiffies);
2001 }
2002
2003 static void
2004 bus_create_response(ulong busNo, int response)
2005 {
2006         bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2007 }
2008
2009 static void
2010 bus_destroy_response(ulong busNo, int response)
2011 {
2012         bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2013 }
2014
2015 static void
2016 device_create_response(ulong busNo, ulong devNo, int response)
2017 {
2018         device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2019 }
2020
2021 static void
2022 device_destroy_response(ulong busNo, ulong devNo, int response)
2023 {
2024         device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2025 }
2026
2027 void
2028 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
2029 {
2030
2031         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2032                                      bus_no, dev_no, response,
2033                                      segment_state_standby);
2034 }
2035 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2036
2037 static void
2038 device_resume_response(ulong busNo, ulong devNo, int response)
2039 {
2040         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2041                                      busNo, devNo, response,
2042                                      segment_state_running);
2043 }
2044
2045 BOOL
2046 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2047 {
2048         void *p = findbus(&bus_info_list, bus_no);
2049
2050         if (!p)
2051                 return FALSE;
2052         memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2053         return TRUE;
2054 }
2055 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2056
2057 BOOL
2058 visorchipset_set_bus_context(ulong bus_no, void *context)
2059 {
2060         struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2061
2062         if (!p)
2063                 return FALSE;
2064         p->bus_driver_context = context;
2065         return TRUE;
2066 }
2067 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2068
2069 BOOL
2070 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2071                              struct visorchipset_device_info *dev_info)
2072 {
2073         void *p = finddevice(&dev_info_list, bus_no, dev_no);
2074
2075         if (!p)
2076                 return FALSE;
2077         memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2078         return TRUE;
2079 }
2080 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2081
2082 BOOL
2083 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2084 {
2085         struct visorchipset_device_info *p =
2086                         finddevice(&dev_info_list, bus_no, dev_no);
2087
2088         if (!p)
2089                 return FALSE;
2090         p->bus_driver_context = context;
2091         return TRUE;
2092 }
2093 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2094
2095 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2096  */
2097 void *
2098 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2099                          char *fn, int ln)
2100 {
2101         gfp_t gfp;
2102         void *p;
2103
2104         if (ok_to_block)
2105                 gfp = GFP_KERNEL;
2106         else
2107                 gfp = GFP_ATOMIC;
2108         /* __GFP_NORETRY means "ok to fail", meaning
2109          * kmem_cache_alloc() can return NULL, implying the caller CAN
2110          * cope with failure.  If you do NOT specify __GFP_NORETRY,
2111          * Linux will go to extreme measures to get memory for you
2112          * (like, invoke oom killer), which will probably cripple the
2113          * system.
2114          */
2115         gfp |= __GFP_NORETRY;
2116         p = kmem_cache_alloc(pool, gfp);
2117         if (!p)
2118                 return NULL;
2119
2120         atomic_inc(&Visorchipset_cache_buffers_in_use);
2121         return p;
2122 }
2123
2124 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2125  */
2126 void
2127 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2128 {
2129         if (!p)
2130                 return;
2131
2132         atomic_dec(&Visorchipset_cache_buffers_in_use);
2133         kmem_cache_free(pool, p);
2134 }
2135
2136 static ssize_t chipsetready_store(struct device *dev,
2137         struct device_attribute *attr, const char *buf, size_t count)
2138 {
2139         char msgtype[64];
2140
2141         if (sscanf(buf, "%63s", msgtype) != 1)
2142                 return -EINVAL;
2143
2144         if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2145                 chipset_events[0] = 1;
2146                 return count;
2147         } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2148                 chipset_events[1] = 1;
2149                 return count;
2150         }
2151         return -EINVAL;
2152 }
2153
2154 /* The parahotplug/devicedisabled interface gets called by our support script
2155  * when an SR-IOV device has been shut down. The ID is passed to the script
2156  * and then passed back when the device has been removed.
2157  */
2158 static ssize_t devicedisabled_store(struct device *dev,
2159         struct device_attribute *attr, const char *buf, size_t count)
2160 {
2161         uint id;
2162
2163         if (kstrtouint(buf, 10, &id) != 0)
2164                 return -EINVAL;
2165
2166         parahotplug_request_complete(id, 0);
2167         return count;
2168 }
2169
2170 /* The parahotplug/deviceenabled interface gets called by our support script
2171  * when an SR-IOV device has been recovered. The ID is passed to the script
2172  * and then passed back when the device has been brought back up.
2173  */
2174 static ssize_t deviceenabled_store(struct device *dev,
2175         struct device_attribute *attr, const char *buf, size_t count)
2176 {
2177         uint id;
2178
2179         if (kstrtouint(buf, 10, &id) != 0)
2180                 return -EINVAL;
2181
2182         parahotplug_request_complete(id, 1);
2183         return count;
2184 }
2185
2186 static int __init
2187 visorchipset_init(void)
2188 {
2189         int rc = 0, x = 0;
2190         HOSTADDRESS addr;
2191
2192         if (!unisys_spar_platform)
2193                 return -ENODEV;
2194
2195         memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2196         memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2197         memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
2198         memset(&LiveDump_info, 0, sizeof(LiveDump_info));
2199         atomic_set(&LiveDump_info.buffers_in_use, 0);
2200
2201         if (visorchipset_testvnic) {
2202                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2203                 rc = x;
2204                 goto Away;
2205         }
2206
2207         addr = controlvm_get_channel_address();
2208         if (addr != 0) {
2209                 controlvm_channel =
2210                     visorchannel_create_with_lock
2211                     (addr,
2212                      sizeof(struct spar_controlvm_channel_protocol),
2213                      spar_controlvm_channel_protocol_uuid);
2214                 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2215                                 visorchannel_get_header(controlvm_channel))) {
2216                         initialize_controlvm_payload();
2217                 } else {
2218                         visorchannel_destroy(controlvm_channel);
2219                         controlvm_channel = NULL;
2220                         return -ENODEV;
2221                 }
2222         } else {
2223                 return -ENODEV;
2224         }
2225
2226         MajorDev = MKDEV(visorchipset_major, 0);
2227         rc = visorchipset_file_init(MajorDev, &controlvm_channel);
2228         if (rc < 0) {
2229                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2230                 goto Away;
2231         }
2232
2233         memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2234
2235         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2236
2237         memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2238
2239         Putfile_buffer_list_pool =
2240             kmem_cache_create(Putfile_buffer_list_pool_name,
2241                               sizeof(struct putfile_buffer_entry),
2242                               0, SLAB_HWCACHE_ALIGN, NULL);
2243         if (!Putfile_buffer_list_pool) {
2244                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2245                 rc = -1;
2246                 goto Away;
2247         }
2248         if (!visorchipset_disable_controlvm) {
2249                 /* if booting in a crash kernel */
2250                 if (visorchipset_crash_kernel)
2251                         INIT_DELAYED_WORK(&periodic_controlvm_work,
2252                                           setup_crash_devices_work_queue);
2253                 else
2254                         INIT_DELAYED_WORK(&periodic_controlvm_work,
2255                                           controlvm_periodic_work);
2256                 periodic_controlvm_workqueue =
2257                     create_singlethread_workqueue("visorchipset_controlvm");
2258
2259                 if (periodic_controlvm_workqueue == NULL) {
2260                         POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2261                                          DIAG_SEVERITY_ERR);
2262                         rc = -ENOMEM;
2263                         goto Away;
2264                 }
2265                 most_recent_message_jiffies = jiffies;
2266                 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2267                 rc = queue_delayed_work(periodic_controlvm_workqueue,
2268                                         &periodic_controlvm_work, poll_jiffies);
2269                 if (rc < 0) {
2270                         POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2271                                          DIAG_SEVERITY_ERR);
2272                         goto Away;
2273                 }
2274
2275         }
2276
2277         Visorchipset_platform_device.dev.devt = MajorDev;
2278         if (platform_device_register(&Visorchipset_platform_device) < 0) {
2279                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2280                 rc = -1;
2281                 goto Away;
2282         }
2283         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2284         rc = 0;
2285 Away:
2286         if (rc) {
2287                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2288                                  POSTCODE_SEVERITY_ERR);
2289         }
2290         return rc;
2291 }
2292
2293 static void
2294 visorchipset_exit(void)
2295 {
2296         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2297
2298         if (visorchipset_disable_controlvm) {
2299                 ;
2300         } else {
2301                 cancel_delayed_work(&periodic_controlvm_work);
2302                 flush_workqueue(periodic_controlvm_workqueue);
2303                 destroy_workqueue(periodic_controlvm_workqueue);
2304                 periodic_controlvm_workqueue = NULL;
2305                 destroy_controlvm_payload_info(&ControlVm_payload_info);
2306         }
2307         if (Putfile_buffer_list_pool) {
2308                 kmem_cache_destroy(Putfile_buffer_list_pool);
2309                 Putfile_buffer_list_pool = NULL;
2310         }
2311
2312         cleanup_controlvm_structures();
2313
2314         memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2315
2316         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2317
2318         memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2319
2320         visorchannel_destroy(controlvm_channel);
2321
2322         visorchipset_file_cleanup();
2323         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2324 }
2325
2326 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2327 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2328 int visorchipset_testvnic = 0;
2329
2330 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2331 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2332 int visorchipset_testvnicclient = 0;
2333
2334 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2335 MODULE_PARM_DESC(visorchipset_testmsg,
2336                  "1 to manufacture the chipset, bus, and switch messages");
2337 int visorchipset_testmsg = 0;
2338
2339 module_param_named(major, visorchipset_major, int, S_IRUGO);
2340 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2341 int visorchipset_major = 0;
2342
2343 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2344 MODULE_PARM_DESC(visorchipset_serverreqwait,
2345                  "1 to have the module wait for the visor bus to register");
2346 int visorchipset_serverregwait = 0;     /* default is off */
2347 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2348 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2349 int visorchipset_clientregwait = 1;     /* default is on */
2350 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2351 MODULE_PARM_DESC(visorchipset_testteardown,
2352                  "1 to test teardown of the chipset, bus, and switch");
2353 int visorchipset_testteardown = 0;      /* default is off */
2354 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2355                    S_IRUGO);
2356 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2357                  "1 to disable polling of controlVm channel");
2358 int visorchipset_disable_controlvm = 0; /* default is off */
2359 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2360 MODULE_PARM_DESC(visorchipset_crash_kernel,
2361                  "1 means we are running in crash kernel");
2362 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2363 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2364                    int, S_IRUGO);
2365 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2366                  "1 to hold response to CHIPSET_READY");
2367 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2368                                       * response immediately */
2369 module_init(visorchipset_init);
2370 module_exit(visorchipset_exit);
2371
2372 MODULE_AUTHOR("Unisys");
2373 MODULE_LICENSE("GPL");
2374 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2375                    VERSION);
2376 MODULE_VERSION(VERSION);