staging: unisys: remove extra blank lines in visorchipset_main.c
[firefly-linux-kernel-4.4.55.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uisutils.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
28
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0"        /* physical network itf for
36                                          * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE   50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode.  As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
49 */
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies;       /* when we got our last
53                                                  * controlvm message */
54 static inline char *
55 NONULLSTR(char *s)
56 {
57         if (s)
58                 return s;
59         return "";
60 }
61
62 static int serverregistered;
63 static int clientregistered;
64
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
67
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
71
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76         SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
81
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
83  * "visorhackbus")
84  */
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86         (((uuid_le_cmp(channel_type_guid,\
87                        spar_vnic_channel_protocol_uuid) == 0) ||\
88         (uuid_le_cmp(channel_type_guid,\
89                         spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92 #define is_diagpool_channel(channel_type_guid) \
93         (uuid_le_cmp(channel_type_guid,\
94                      spar_diag_pool_channel_protocol_uuid) == 0)
95
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
98
99 static struct visorchannel *controlvm_channel;
100
101 /* Manages the request payload in the controlvm channel */
102 static struct controlvm_payload_info {
103         u8 __iomem *ptr;        /* pointer to base address of payload pool */
104         u64 offset;             /* offset from beginning of controlvm
105                                  * channel to beginning of payload * pool */
106         u32 bytes;              /* number of bytes in payload pool */
107 } controlvm_payload_info;
108
109 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
110  * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
111  */
112 static struct livedump_info {
113         struct controlvm_message_header dumpcapture_header;
114         struct controlvm_message_header gettextdump_header;
115         struct controlvm_message_header dumpcomplete_header;
116         BOOL gettextdump_outstanding;
117         u32 crc32;
118         ulong length;
119         atomic_t buffers_in_use;
120         ulong destination;
121 } livedump_info;
122
123 /* The following globals are used to handle the scenario where we are unable to
124  * offload the payload from a controlvm message due to memory requirements.  In
125  * this scenario, we simply stash the controlvm message, then attempt to
126  * process it again the next time controlvm_periodic_work() runs.
127  */
128 static struct controlvm_message ControlVm_Pending_Msg;
129 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
130
131 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
132  * TRANSMIT_FILE PutFile payloads.
133  */
134 static struct kmem_cache *Putfile_buffer_list_pool;
135 static const char Putfile_buffer_list_pool_name[] =
136         "controlvm_putfile_buffer_list_pool";
137
138 /* This identifies a data buffer that has been received via a controlvm messages
139  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140  */
141 struct putfile_buffer_entry {
142         struct list_head next;  /* putfile_buffer_entry list */
143         struct parser_context *parser_ctx; /* points to input data buffer */
144 };
145
146 /* List of struct putfile_request *, via next_putfile_request member.
147  * Each entry in this list identifies an outstanding TRANSMIT_FILE
148  * conversation.
149  */
150 static LIST_HEAD(Putfile_request_list);
151
152 /* This describes a buffer and its current state of transfer (e.g., how many
153  * bytes have already been supplied as putfile data, and how many bytes are
154  * remaining) for a putfile_request.
155  */
156 struct putfile_active_buffer {
157         /* a payload from a controlvm message, containing a file data buffer */
158         struct parser_context *parser_ctx;
159         /* points within data area of parser_ctx to next byte of data */
160         u8 *pnext;
161         /* # bytes left from <pnext> to the end of this data buffer */
162         size_t bytes_remaining;
163 };
164
165 #define PUTFILE_REQUEST_SIG 0x0906101302281211
166 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167  * conversation.  Structs of this type are dynamically linked into
168  * <Putfile_request_list>.
169  */
170 struct putfile_request {
171         u64 sig;                /* PUTFILE_REQUEST_SIG */
172
173         /* header from original TransmitFile request */
174         struct controlvm_message_header controlvm_header;
175         u64 file_request_number;        /* from original TransmitFile request */
176
177         /* link to next struct putfile_request */
178         struct list_head next_putfile_request;
179
180         /* most-recent sequence number supplied via a controlvm message */
181         u64 data_sequence_number;
182
183         /* head of putfile_buffer_entry list, which describes the data to be
184          * supplied as putfile data;
185          * - this list is added to when controlvm messages come in that supply
186          * file data
187          * - this list is removed from via the hotplug program that is actually
188          * consuming these buffers to write as file data */
189         struct list_head input_buffer_list;
190         spinlock_t req_list_lock;       /* lock for input_buffer_list */
191
192         /* waiters for input_buffer_list to go non-empty */
193         wait_queue_head_t input_buffer_wq;
194
195         /* data not yet read within current putfile_buffer_entry */
196         struct putfile_active_buffer active_buf;
197
198         /* <0 = failed, 0 = in-progress, >0 = successful; */
199         /* note that this must be set with req_list_lock, and if you set <0, */
200         /* it is your responsibility to also free up all of the other objects */
201         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202         /* before releasing the lock */
203         int completion_status;
204 };
205
206 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
207
208 struct parahotplug_request {
209         struct list_head list;
210         int id;
211         unsigned long expiration;
212         struct controlvm_message msg;
213 };
214
215 static LIST_HEAD(Parahotplug_request_list);
216 static DEFINE_SPINLOCK(Parahotplug_request_list_lock);  /* lock for above */
217 static void parahotplug_process_list(void);
218
219 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
220  * CONTROLVM_REPORTEVENT.
221  */
222 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
223 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
224
225 static void bus_create_response(ulong busNo, int response);
226 static void bus_destroy_response(ulong busNo, int response);
227 static void device_create_response(ulong busNo, ulong devNo, int response);
228 static void device_destroy_response(ulong busNo, ulong devNo, int response);
229 static void device_resume_response(ulong busNo, ulong devNo, int response);
230
231 static struct visorchipset_busdev_responders BusDev_Responders = {
232         .bus_create = bus_create_response,
233         .bus_destroy = bus_destroy_response,
234         .device_create = device_create_response,
235         .device_destroy = device_destroy_response,
236         .device_pause = visorchipset_device_pause_response,
237         .device_resume = device_resume_response,
238 };
239
240 /* info for /dev/visorchipset */
241 static dev_t MajorDev = -1; /**< indicates major num for device */
242
243 /* prototypes for attributes */
244 static ssize_t toolaction_show(struct device *dev,
245         struct device_attribute *attr, char *buf);
246 static ssize_t toolaction_store(struct device *dev,
247         struct device_attribute *attr, const char *buf, size_t count);
248 static DEVICE_ATTR_RW(toolaction);
249
250 static ssize_t boottotool_show(struct device *dev,
251         struct device_attribute *attr, char *buf);
252 static ssize_t boottotool_store(struct device *dev,
253         struct device_attribute *attr, const char *buf, size_t count);
254 static DEVICE_ATTR_RW(boottotool);
255
256 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
257         char *buf);
258 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
259         const char *buf, size_t count);
260 static DEVICE_ATTR_RW(error);
261
262 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
263         char *buf);
264 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
265         const char *buf, size_t count);
266 static DEVICE_ATTR_RW(textid);
267
268 static ssize_t remaining_steps_show(struct device *dev,
269         struct device_attribute *attr, char *buf);
270 static ssize_t remaining_steps_store(struct device *dev,
271         struct device_attribute *attr, const char *buf, size_t count);
272 static DEVICE_ATTR_RW(remaining_steps);
273
274 static ssize_t chipsetready_store(struct device *dev,
275                 struct device_attribute *attr, const char *buf, size_t count);
276 static DEVICE_ATTR_WO(chipsetready);
277
278 static ssize_t devicedisabled_store(struct device *dev,
279                 struct device_attribute *attr, const char *buf, size_t count);
280 static DEVICE_ATTR_WO(devicedisabled);
281
282 static ssize_t deviceenabled_store(struct device *dev,
283                 struct device_attribute *attr, const char *buf, size_t count);
284 static DEVICE_ATTR_WO(deviceenabled);
285
286 static struct attribute *visorchipset_install_attrs[] = {
287         &dev_attr_toolaction.attr,
288         &dev_attr_boottotool.attr,
289         &dev_attr_error.attr,
290         &dev_attr_textid.attr,
291         &dev_attr_remaining_steps.attr,
292         NULL
293 };
294
295 static struct attribute_group visorchipset_install_group = {
296         .name = "install",
297         .attrs = visorchipset_install_attrs
298 };
299
300 static struct attribute *visorchipset_guest_attrs[] = {
301         &dev_attr_chipsetready.attr,
302         NULL
303 };
304
305 static struct attribute_group visorchipset_guest_group = {
306         .name = "guest",
307         .attrs = visorchipset_guest_attrs
308 };
309
310 static struct attribute *visorchipset_parahotplug_attrs[] = {
311         &dev_attr_devicedisabled.attr,
312         &dev_attr_deviceenabled.attr,
313         NULL
314 };
315
316 static struct attribute_group visorchipset_parahotplug_group = {
317         .name = "parahotplug",
318         .attrs = visorchipset_parahotplug_attrs
319 };
320
321 static const struct attribute_group *visorchipset_dev_groups[] = {
322         &visorchipset_install_group,
323         &visorchipset_guest_group,
324         &visorchipset_parahotplug_group,
325         NULL
326 };
327
328 /* /sys/devices/platform/visorchipset */
329 static struct platform_device Visorchipset_platform_device = {
330         .name = "visorchipset",
331         .id = -1,
332         .dev.groups = visorchipset_dev_groups,
333 };
334
335 /* Function prototypes */
336 static void controlvm_respond(struct controlvm_message_header *msgHdr,
337                               int response);
338 static void controlvm_respond_chipset_init(
339                 struct controlvm_message_header *msgHdr, int response,
340                 enum ultra_chipset_feature features);
341 static void controlvm_respond_physdev_changestate(
342                 struct controlvm_message_header *msgHdr, int response,
343                 struct spar_segment_state state);
344
345 static ssize_t toolaction_show(struct device *dev,
346                                struct device_attribute *attr,
347                                char *buf)
348 {
349         u8 toolAction;
350
351         visorchannel_read(controlvm_channel,
352                 offsetof(struct spar_controlvm_channel_protocol,
353                            tool_action), &toolAction, sizeof(u8));
354         return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
355 }
356
357 static ssize_t toolaction_store(struct device *dev,
358                                 struct device_attribute *attr,
359                                 const char *buf, size_t count)
360 {
361         u8 toolAction;
362         int ret;
363
364         if (kstrtou8(buf, 10, &toolAction) != 0)
365                 return -EINVAL;
366
367         ret = visorchannel_write(controlvm_channel,
368                 offsetof(struct spar_controlvm_channel_protocol, tool_action),
369                 &toolAction, sizeof(u8));
370
371         if (ret)
372                 return ret;
373         return count;
374 }
375
376 static ssize_t boottotool_show(struct device *dev,
377                                struct device_attribute *attr,
378                                char *buf)
379 {
380         struct efi_spar_indication efiSparIndication;
381
382         visorchannel_read(controlvm_channel,
383                 offsetof(struct spar_controlvm_channel_protocol,
384                         efi_spar_ind), &efiSparIndication,
385                 sizeof(struct efi_spar_indication));
386         return scnprintf(buf, PAGE_SIZE, "%u\n",
387                         efiSparIndication.boot_to_tool);
388 }
389
390 static ssize_t boottotool_store(struct device *dev,
391                                 struct device_attribute *attr,
392                                 const char *buf, size_t count)
393 {
394         int val, ret;
395         struct efi_spar_indication efiSparIndication;
396
397         if (kstrtoint(buf, 10, &val) != 0)
398                 return -EINVAL;
399
400         efiSparIndication.boot_to_tool = val;
401         ret = visorchannel_write(controlvm_channel,
402                         offsetof(struct spar_controlvm_channel_protocol,
403                                 efi_spar_ind),
404                         &(efiSparIndication),
405                 sizeof(struct efi_spar_indication));
406
407         if (ret)
408                 return ret;
409         return count;
410 }
411
412 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
413                 char *buf)
414 {
415         u32 error;
416
417         visorchannel_read(controlvm_channel, offsetof(
418                 struct spar_controlvm_channel_protocol, installation_error),
419                 &error, sizeof(u32));
420         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
421 }
422
423 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
424                 const char *buf, size_t count)
425 {
426         u32 error;
427         int ret;
428
429         if (kstrtou32(buf, 10, &error) != 0)
430                 return -EINVAL;
431
432         ret = visorchannel_write(controlvm_channel,
433                         offsetof(struct spar_controlvm_channel_protocol,
434                                 installation_error),
435                         &error, sizeof(u32));
436         if (ret)
437                 return ret;
438         return count;
439 }
440
441 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
442                 char *buf)
443 {
444         u32 textId;
445
446         visorchannel_read(controlvm_channel, offsetof(
447                 struct spar_controlvm_channel_protocol, installation_text_id),
448                 &textId, sizeof(u32));
449         return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
450 }
451
452 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
453                 const char *buf, size_t count)
454 {
455         u32 textId;
456         int ret;
457
458         if (kstrtou32(buf, 10, &textId) != 0)
459                 return -EINVAL;
460
461         ret = visorchannel_write(controlvm_channel,
462                         offsetof(struct spar_controlvm_channel_protocol,
463                                 installation_text_id),
464                         &textId, sizeof(u32));
465         if (ret)
466                 return ret;
467         return count;
468 }
469
470 static ssize_t remaining_steps_show(struct device *dev,
471         struct device_attribute *attr, char *buf)
472 {
473         u16 remainingSteps;
474
475         visorchannel_read(controlvm_channel,
476                 offsetof(struct spar_controlvm_channel_protocol,
477                         installation_remaining_steps),
478                 &remainingSteps,
479                 sizeof(u16));
480         return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
481 }
482
483 static ssize_t remaining_steps_store(struct device *dev,
484         struct device_attribute *attr, const char *buf, size_t count)
485 {
486         u16 remainingSteps;
487         int ret;
488
489         if (kstrtou16(buf, 10, &remainingSteps) != 0)
490                 return -EINVAL;
491
492         ret = visorchannel_write(controlvm_channel,
493                         offsetof(struct spar_controlvm_channel_protocol,
494                                 installation_remaining_steps),
495                         &remainingSteps, sizeof(u16));
496         if (ret)
497                 return ret;
498         return count;
499 }
500
501 #if 0
502 static void
503 testUnicode(void)
504 {
505         wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
506         char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
507         wchar_t unicode2[99];
508
509         /* NOTE: Either due to a bug, or feature I don't understand, the
510          *       kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
511          *       trailed NUL byte!!   REALLY!!!!!    Arrrrgggghhhhh
512          */
513
514         LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
515         LOGINF("utf8_wcstombs=%d",
516                chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
517         if (chrs >= 0)
518                 s[chrs] = '\0'; /* GRRRRRRRR */
519         LOGINF("s='%s'", s);
520         LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
521         if (chrs >= 0)
522                 unicode2[chrs] = 0;     /* GRRRRRRRR */
523         if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
524                 LOGINF("strings match... good");
525         else
526                 LOGINF("strings did not match!!");
527 }
528 #endif
529
530 static void
531 busInfo_clear(void *v)
532 {
533         struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
534
535         if (p->proc_object) {
536                 visor_proc_DestroyObject(p->proc_object);
537                 p->proc_object = NULL;
538         }
539         kfree(p->name);
540         p->name = NULL;
541
542         kfree(p->description);
543         p->description = NULL;
544
545         p->state.created = 0;
546         memset(p, 0, sizeof(struct visorchipset_bus_info));
547 }
548
549 static void
550 devInfo_clear(void *v)
551 {
552         struct visorchipset_device_info *p =
553                         (struct visorchipset_device_info *)(v);
554
555         p->state.created = 0;
556         memset(p, 0, sizeof(struct visorchipset_device_info));
557 }
558
559 static u8
560 check_chipset_events(void)
561 {
562         int i;
563         u8 send_msg = 1;
564         /* Check events to determine if response should be sent */
565         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
566                 send_msg &= chipset_events[i];
567         return send_msg;
568 }
569
570 static void
571 clear_chipset_events(void)
572 {
573         int i;
574         /* Clear chipset_events */
575         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
576                 chipset_events[i] = 0;
577 }
578
579 void
580 visorchipset_register_busdev_server(
581                         struct visorchipset_busdev_notifiers *notifiers,
582                         struct visorchipset_busdev_responders *responders,
583                         struct ultra_vbus_deviceinfo *driver_info)
584 {
585         down(&notifier_lock);
586         if (notifiers == NULL) {
587                 memset(&BusDev_Server_Notifiers, 0,
588                        sizeof(BusDev_Server_Notifiers));
589                 serverregistered = 0;   /* clear flag */
590         } else {
591                 BusDev_Server_Notifiers = *notifiers;
592                 serverregistered = 1;   /* set flag */
593         }
594         if (responders)
595                 *responders = BusDev_Responders;
596         if (driver_info)
597                 bus_device_info_init(driver_info, "chipset", "visorchipset",
598                                    VERSION, NULL);
599
600         up(&notifier_lock);
601 }
602 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
603
604 void
605 visorchipset_register_busdev_client(
606                         struct visorchipset_busdev_notifiers *notifiers,
607                         struct visorchipset_busdev_responders *responders,
608                         struct ultra_vbus_deviceinfo *driver_info)
609 {
610         down(&notifier_lock);
611         if (notifiers == NULL) {
612                 memset(&BusDev_Client_Notifiers, 0,
613                        sizeof(BusDev_Client_Notifiers));
614                 clientregistered = 0;   /* clear flag */
615         } else {
616                 BusDev_Client_Notifiers = *notifiers;
617                 clientregistered = 1;   /* set flag */
618         }
619         if (responders)
620                 *responders = BusDev_Responders;
621         if (driver_info)
622                 bus_device_info_init(driver_info, "chipset(bolts)",
623                                      "visorchipset", VERSION, NULL);
624         up(&notifier_lock);
625 }
626 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
627
628 static void
629 cleanup_controlvm_structures(void)
630 {
631         struct visorchipset_bus_info *bi, *tmp_bi;
632         struct visorchipset_device_info *di, *tmp_di;
633
634         list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
635                 busInfo_clear(bi);
636                 list_del(&bi->entry);
637                 kfree(bi);
638         }
639
640         list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
641                 devInfo_clear(di);
642                 list_del(&di->entry);
643                 kfree(di);
644         }
645 }
646
647 static void
648 chipset_init(struct controlvm_message *inmsg)
649 {
650         static int chipset_inited;
651         enum ultra_chipset_feature features = 0;
652         int rc = CONTROLVM_RESP_SUCCESS;
653
654         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
655         if (chipset_inited) {
656                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
657                 goto Away;
658         }
659         chipset_inited = 1;
660         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
661
662         /* Set features to indicate we support parahotplug (if Command
663          * also supports it). */
664         features =
665             inmsg->cmd.init_chipset.
666             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
667
668         /* Set the "reply" bit so Command knows this is a
669          * features-aware driver. */
670         features |= ULTRA_CHIPSET_FEATURE_REPLY;
671
672 Away:
673         if (rc < 0)
674                 cleanup_controlvm_structures();
675         if (inmsg->hdr.flags.response_expected)
676                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
677 }
678
679 static void
680 controlvm_init_response(struct controlvm_message *msg,
681                         struct controlvm_message_header *msgHdr, int response)
682 {
683         memset(msg, 0, sizeof(struct controlvm_message));
684         memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
685         msg->hdr.payload_bytes = 0;
686         msg->hdr.payload_vm_offset = 0;
687         msg->hdr.payload_max_bytes = 0;
688         if (response < 0) {
689                 msg->hdr.flags.failed = 1;
690                 msg->hdr.completion_status = (u32) (-response);
691         }
692 }
693
694 static void
695 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
696 {
697         struct controlvm_message outmsg;
698
699         controlvm_init_response(&outmsg, msgHdr, response);
700         /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
701         * back the deviceChangeState structure in the packet. */
702         if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
703             g_devicechangestate_packet.device_change_state.bus_no ==
704             g_diagpool_bus_no &&
705             g_devicechangestate_packet.device_change_state.dev_no ==
706             g_diagpool_dev_no)
707                 outmsg.cmd = g_devicechangestate_packet;
708         if (outmsg.hdr.flags.test_message == 1)
709                 return;
710
711         if (!visorchannel_signalinsert(controlvm_channel,
712                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
713                 return;
714         }
715 }
716
717 static void
718 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
719                                int response,
720                                enum ultra_chipset_feature features)
721 {
722         struct controlvm_message outmsg;
723
724         controlvm_init_response(&outmsg, msgHdr, response);
725         outmsg.cmd.init_chipset.features = features;
726         if (!visorchannel_signalinsert(controlvm_channel,
727                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
728                 return;
729         }
730 }
731
732 static void controlvm_respond_physdev_changestate(
733                 struct controlvm_message_header *msgHdr, int response,
734                 struct spar_segment_state state)
735 {
736         struct controlvm_message outmsg;
737
738         controlvm_init_response(&outmsg, msgHdr, response);
739         outmsg.cmd.device_change_state.state = state;
740         outmsg.cmd.device_change_state.flags.phys_device = 1;
741         if (!visorchannel_signalinsert(controlvm_channel,
742                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
743                 return;
744         }
745 }
746
747 void
748 visorchipset_save_message(struct controlvm_message *msg,
749                           enum crash_obj_type type)
750 {
751         u32 localSavedCrashMsgOffset;
752         u16 localSavedCrashMsgCount;
753
754         /* get saved message count */
755         if (visorchannel_read(controlvm_channel,
756                               offsetof(struct spar_controlvm_channel_protocol,
757                                        saved_crash_message_count),
758                               &localSavedCrashMsgCount, sizeof(u16)) < 0) {
759                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
760                                  POSTCODE_SEVERITY_ERR);
761                 return;
762         }
763
764         if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
765                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
766                                  localSavedCrashMsgCount,
767                                  POSTCODE_SEVERITY_ERR);
768                 return;
769         }
770
771         /* get saved crash message offset */
772         if (visorchannel_read(controlvm_channel,
773                               offsetof(struct spar_controlvm_channel_protocol,
774                                        saved_crash_message_offset),
775                               &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
776                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
777                                  POSTCODE_SEVERITY_ERR);
778                 return;
779         }
780
781         if (type == CRASH_BUS) {
782                 if (visorchannel_write(controlvm_channel,
783                                        localSavedCrashMsgOffset,
784                                        msg,
785                                        sizeof(struct controlvm_message)) < 0) {
786                         POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
787                                          POSTCODE_SEVERITY_ERR);
788                         return;
789                 }
790         } else {
791                 if (visorchannel_write(controlvm_channel,
792                                        localSavedCrashMsgOffset +
793                                        sizeof(struct controlvm_message), msg,
794                                        sizeof(struct controlvm_message)) < 0) {
795                         POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
796                                          POSTCODE_SEVERITY_ERR);
797                         return;
798                 }
799         }
800 }
801 EXPORT_SYMBOL_GPL(visorchipset_save_message);
802
803 static void
804 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
805 {
806         struct visorchipset_bus_info *p = NULL;
807         BOOL need_clear = FALSE;
808
809         p = findbus(&bus_info_list, busNo);
810         if (!p)
811                 return;
812
813         if (response < 0) {
814                 if ((cmdId == CONTROLVM_BUS_CREATE) &&
815                     (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
816                         /* undo the row we just created... */
817                         delbusdevices(&dev_info_list, busNo);
818         } else {
819                 if (cmdId == CONTROLVM_BUS_CREATE)
820                         p->state.created = 1;
821                 if (cmdId == CONTROLVM_BUS_DESTROY)
822                         need_clear = TRUE;
823         }
824
825         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
826                 return;         /* no controlvm response needed */
827         if (p->pending_msg_hdr.id != (u32) cmdId)
828                 return;
829         controlvm_respond(&p->pending_msg_hdr, response);
830         p->pending_msg_hdr.id = CONTROLVM_INVALID;
831         if (need_clear) {
832                 busInfo_clear(p);
833                 delbusdevices(&dev_info_list, busNo);
834         }
835 }
836
837 static void
838 device_changestate_responder(enum controlvm_id cmdId,
839                              ulong busNo, ulong devNo, int response,
840                              struct spar_segment_state responseState)
841 {
842         struct visorchipset_device_info *p = NULL;
843         struct controlvm_message outmsg;
844
845         p = finddevice(&dev_info_list, busNo, devNo);
846         if (!p)
847                 return;
848         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
849                 return;         /* no controlvm response needed */
850         if (p->pending_msg_hdr.id != cmdId)
851                 return;
852
853         controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
854
855         outmsg.cmd.device_change_state.bus_no = busNo;
856         outmsg.cmd.device_change_state.dev_no = devNo;
857         outmsg.cmd.device_change_state.state = responseState;
858
859         if (!visorchannel_signalinsert(controlvm_channel,
860                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
861                 return;
862
863         p->pending_msg_hdr.id = CONTROLVM_INVALID;
864 }
865
866 static void
867 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
868                  int response)
869 {
870         struct visorchipset_device_info *p = NULL;
871         BOOL need_clear = FALSE;
872
873         p = finddevice(&dev_info_list, busNo, devNo);
874         if (!p)
875                 return;
876         if (response >= 0) {
877                 if (cmdId == CONTROLVM_DEVICE_CREATE)
878                         p->state.created = 1;
879                 if (cmdId == CONTROLVM_DEVICE_DESTROY)
880                         need_clear = TRUE;
881         }
882
883         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
884                 return;         /* no controlvm response needed */
885
886         if (p->pending_msg_hdr.id != (u32) cmdId)
887                 return;
888
889         controlvm_respond(&p->pending_msg_hdr, response);
890         p->pending_msg_hdr.id = CONTROLVM_INVALID;
891         if (need_clear)
892                 devInfo_clear(p);
893 }
894
895 static void
896 bus_epilog(u32 busNo,
897            u32 cmd, struct controlvm_message_header *msgHdr,
898            int response, BOOL needResponse)
899 {
900         BOOL notified = FALSE;
901
902         struct visorchipset_bus_info *pBusInfo = findbus(&bus_info_list, busNo);
903
904         if (!pBusInfo)
905                 return;
906
907         if (needResponse) {
908                 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
909                        sizeof(struct controlvm_message_header));
910         } else
911                 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
912
913         down(&notifier_lock);
914         if (response == CONTROLVM_RESP_SUCCESS) {
915                 switch (cmd) {
916                 case CONTROLVM_BUS_CREATE:
917                         /* We can't tell from the bus_create
918                         * information which of our 2 bus flavors the
919                         * devices on this bus will ultimately end up.
920                         * FORTUNATELY, it turns out it is harmless to
921                         * send the bus_create to both of them.  We can
922                         * narrow things down a little bit, though,
923                         * because we know: - BusDev_Server can handle
924                         * either server or client devices
925                         * - BusDev_Client can handle ONLY client
926                         * devices */
927                         if (BusDev_Server_Notifiers.bus_create) {
928                                 (*BusDev_Server_Notifiers.bus_create) (busNo);
929                                 notified = TRUE;
930                         }
931                         if ((!pBusInfo->flags.server) /*client */ &&
932                             BusDev_Client_Notifiers.bus_create) {
933                                 (*BusDev_Client_Notifiers.bus_create) (busNo);
934                                 notified = TRUE;
935                         }
936                         break;
937                 case CONTROLVM_BUS_DESTROY:
938                         if (BusDev_Server_Notifiers.bus_destroy) {
939                                 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
940                                 notified = TRUE;
941                         }
942                         if ((!pBusInfo->flags.server) /*client */ &&
943                             BusDev_Client_Notifiers.bus_destroy) {
944                                 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
945                                 notified = TRUE;
946                         }
947                         break;
948                 }
949         }
950         if (notified)
951                 /* The callback function just called above is responsible
952                  * for calling the appropriate visorchipset_busdev_responders
953                  * function, which will call bus_responder()
954                  */
955                 ;
956         else
957                 bus_responder(cmd, busNo, response);
958         up(&notifier_lock);
959 }
960
961 static void
962 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
963               struct controlvm_message_header *msgHdr, int response,
964               BOOL needResponse, BOOL for_visorbus)
965 {
966         struct visorchipset_busdev_notifiers *notifiers = NULL;
967         BOOL notified = FALSE;
968
969         struct visorchipset_device_info *pDevInfo =
970                 finddevice(&dev_info_list, busNo, devNo);
971         char *envp[] = {
972                 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
973                 NULL
974         };
975
976         if (!pDevInfo)
977                 return;
978
979         if (for_visorbus)
980                 notifiers = &BusDev_Server_Notifiers;
981         else
982                 notifiers = &BusDev_Client_Notifiers;
983         if (needResponse) {
984                 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
985                        sizeof(struct controlvm_message_header));
986         } else
987                 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
988
989         down(&notifier_lock);
990         if (response >= 0) {
991                 switch (cmd) {
992                 case CONTROLVM_DEVICE_CREATE:
993                         if (notifiers->device_create) {
994                                 (*notifiers->device_create) (busNo, devNo);
995                                 notified = TRUE;
996                         }
997                         break;
998                 case CONTROLVM_DEVICE_CHANGESTATE:
999                         /* ServerReady / ServerRunning / SegmentStateRunning */
1000                         if (state.alive == segment_state_running.alive &&
1001                             state.operating ==
1002                                 segment_state_running.operating) {
1003                                 if (notifiers->device_resume) {
1004                                         (*notifiers->device_resume) (busNo,
1005                                                                      devNo);
1006                                         notified = TRUE;
1007                                 }
1008                         }
1009                         /* ServerNotReady / ServerLost / SegmentStateStandby */
1010                         else if (state.alive == segment_state_standby.alive &&
1011                                  state.operating ==
1012                                  segment_state_standby.operating) {
1013                                 /* technically this is standby case
1014                                  * where server is lost
1015                                  */
1016                                 if (notifiers->device_pause) {
1017                                         (*notifiers->device_pause) (busNo,
1018                                                                     devNo);
1019                                         notified = TRUE;
1020                                 }
1021                         } else if (state.alive == segment_state_paused.alive &&
1022                                    state.operating ==
1023                                    segment_state_paused.operating) {
1024                                 /* this is lite pause where channel is
1025                                  * still valid just 'pause' of it
1026                                  */
1027                                 if (busNo == g_diagpool_bus_no &&
1028                                     devNo == g_diagpool_dev_no) {
1029                                         /* this will trigger the
1030                                          * diag_shutdown.sh script in
1031                                          * the visorchipset hotplug */
1032                                         kobject_uevent_env
1033                                             (&Visorchipset_platform_device.dev.
1034                                              kobj, KOBJ_ONLINE, envp);
1035                                 }
1036                         }
1037                         break;
1038                 case CONTROLVM_DEVICE_DESTROY:
1039                         if (notifiers->device_destroy) {
1040                                 (*notifiers->device_destroy) (busNo, devNo);
1041                                 notified = TRUE;
1042                         }
1043                         break;
1044                 }
1045         }
1046         if (notified)
1047                 /* The callback function just called above is responsible
1048                  * for calling the appropriate visorchipset_busdev_responders
1049                  * function, which will call device_responder()
1050                  */
1051                 ;
1052         else
1053                 device_responder(cmd, busNo, devNo, response);
1054         up(&notifier_lock);
1055 }
1056
1057 static void
1058 bus_create(struct controlvm_message *inmsg)
1059 {
1060         struct controlvm_message_packet *cmd = &inmsg->cmd;
1061         ulong busNo = cmd->create_bus.bus_no;
1062         int rc = CONTROLVM_RESP_SUCCESS;
1063         struct visorchipset_bus_info *pBusInfo = NULL;
1064
1065
1066         pBusInfo = findbus(&bus_info_list, busNo);
1067         if (pBusInfo && (pBusInfo->state.created == 1)) {
1068                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1069                                  POSTCODE_SEVERITY_ERR);
1070                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1071                 goto Away;
1072         }
1073         pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1074         if (pBusInfo == NULL) {
1075                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1076                                  POSTCODE_SEVERITY_ERR);
1077                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1078                 goto Away;
1079         }
1080
1081         INIT_LIST_HEAD(&pBusInfo->entry);
1082         pBusInfo->bus_no = busNo;
1083         pBusInfo->dev_no = cmd->create_bus.dev_count;
1084
1085         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1086
1087         if (inmsg->hdr.flags.test_message == 1)
1088                 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1089         else
1090                 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1091
1092         pBusInfo->flags.server = inmsg->hdr.flags.server;
1093         pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1094         pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1095         pBusInfo->chan_info.channel_type_uuid =
1096                         cmd->create_bus.bus_data_type_uuid;
1097         pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1098
1099         list_add(&pBusInfo->entry, &bus_info_list);
1100
1101         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1102
1103 Away:
1104         bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1105                    rc, inmsg->hdr.flags.response_expected == 1);
1106 }
1107
1108 static void
1109 bus_destroy(struct controlvm_message *inmsg)
1110 {
1111         struct controlvm_message_packet *cmd = &inmsg->cmd;
1112         ulong busNo = cmd->destroy_bus.bus_no;
1113         struct visorchipset_bus_info *pBusInfo;
1114         int rc = CONTROLVM_RESP_SUCCESS;
1115
1116         pBusInfo = findbus(&bus_info_list, busNo);
1117         if (!pBusInfo) {
1118                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1119                 goto Away;
1120         }
1121         if (pBusInfo->state.created == 0) {
1122                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1123                 goto Away;
1124         }
1125
1126 Away:
1127         bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1128                    rc, inmsg->hdr.flags.response_expected == 1);
1129 }
1130
1131 static void
1132 bus_configure(struct controlvm_message *inmsg,
1133               struct parser_context *parser_ctx)
1134 {
1135         struct controlvm_message_packet *cmd = &inmsg->cmd;
1136         ulong busNo = cmd->configure_bus.bus_no;
1137         struct visorchipset_bus_info *pBusInfo = NULL;
1138         int rc = CONTROLVM_RESP_SUCCESS;
1139         char s[99];
1140
1141         busNo = cmd->configure_bus.bus_no;
1142         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1143
1144         pBusInfo = findbus(&bus_info_list, busNo);
1145         if (!pBusInfo) {
1146                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1147                                  POSTCODE_SEVERITY_ERR);
1148                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1149                 goto Away;
1150         }
1151         if (pBusInfo->state.created == 0) {
1152                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1153                                  POSTCODE_SEVERITY_ERR);
1154                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1155                 goto Away;
1156         }
1157         /* TBD - add this check to other commands also... */
1158         if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1159                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1160                                  POSTCODE_SEVERITY_ERR);
1161                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1162                 goto Away;
1163         }
1164
1165         pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1166         pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1167         parser_param_start(parser_ctx, PARSERSTRING_NAME);
1168         pBusInfo->name = parser_string_get(parser_ctx);
1169
1170         visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1171         POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1172 Away:
1173         bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1174                    rc, inmsg->hdr.flags.response_expected == 1);
1175 }
1176
1177 static void
1178 my_device_create(struct controlvm_message *inmsg)
1179 {
1180         struct controlvm_message_packet *cmd = &inmsg->cmd;
1181         ulong busNo = cmd->create_device.bus_no;
1182         ulong devNo = cmd->create_device.dev_no;
1183         struct visorchipset_device_info *pDevInfo = NULL;
1184         struct visorchipset_bus_info *pBusInfo = NULL;
1185         int rc = CONTROLVM_RESP_SUCCESS;
1186
1187         pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1188         if (pDevInfo && (pDevInfo->state.created == 1)) {
1189                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1190                                  POSTCODE_SEVERITY_ERR);
1191                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1192                 goto Away;
1193         }
1194         pBusInfo = findbus(&bus_info_list, busNo);
1195         if (!pBusInfo) {
1196                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1197                                  POSTCODE_SEVERITY_ERR);
1198                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1199                 goto Away;
1200         }
1201         if (pBusInfo->state.created == 0) {
1202                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1203                                  POSTCODE_SEVERITY_ERR);
1204                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1205                 goto Away;
1206         }
1207         pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1208         if (pDevInfo == NULL) {
1209                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1210                                  POSTCODE_SEVERITY_ERR);
1211                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1212                 goto Away;
1213         }
1214
1215         INIT_LIST_HEAD(&pDevInfo->entry);
1216         pDevInfo->bus_no = busNo;
1217         pDevInfo->dev_no = devNo;
1218         pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1219         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1220                          POSTCODE_SEVERITY_INFO);
1221
1222         if (inmsg->hdr.flags.test_message == 1)
1223                 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1224         else
1225                 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1226         pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1227         pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1228         pDevInfo->chan_info.channel_type_uuid =
1229                         cmd->create_device.data_type_uuid;
1230         pDevInfo->chan_info.intr = cmd->create_device.intr;
1231         list_add(&pDevInfo->entry, &dev_info_list);
1232         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1233                          POSTCODE_SEVERITY_INFO);
1234 Away:
1235         /* get the bus and devNo for DiagPool channel */
1236         if (pDevInfo &&
1237             is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1238                 g_diagpool_bus_no = busNo;
1239                 g_diagpool_dev_no = devNo;
1240         }
1241         device_epilog(busNo, devNo, segment_state_running,
1242                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1243                       inmsg->hdr.flags.response_expected == 1,
1244                       FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1245 }
1246
1247 static void
1248 my_device_changestate(struct controlvm_message *inmsg)
1249 {
1250         struct controlvm_message_packet *cmd = &inmsg->cmd;
1251         ulong busNo = cmd->device_change_state.bus_no;
1252         ulong devNo = cmd->device_change_state.dev_no;
1253         struct spar_segment_state state = cmd->device_change_state.state;
1254         struct visorchipset_device_info *pDevInfo = NULL;
1255         int rc = CONTROLVM_RESP_SUCCESS;
1256
1257         pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1258         if (!pDevInfo) {
1259                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1260                                  POSTCODE_SEVERITY_ERR);
1261                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1262                 goto Away;
1263         }
1264         if (pDevInfo->state.created == 0) {
1265                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1266                                  POSTCODE_SEVERITY_ERR);
1267                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1268         }
1269 Away:
1270         if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1271                 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1272                               &inmsg->hdr, rc,
1273                               inmsg->hdr.flags.response_expected == 1,
1274                               FOR_VISORBUS(
1275                                         pDevInfo->chan_info.channel_type_uuid));
1276 }
1277
1278 static void
1279 my_device_destroy(struct controlvm_message *inmsg)
1280 {
1281         struct controlvm_message_packet *cmd = &inmsg->cmd;
1282         ulong busNo = cmd->destroy_device.bus_no;
1283         ulong devNo = cmd->destroy_device.dev_no;
1284         struct visorchipset_device_info *pDevInfo = NULL;
1285         int rc = CONTROLVM_RESP_SUCCESS;
1286
1287         pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1288         if (!pDevInfo) {
1289                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1290                 goto Away;
1291         }
1292         if (pDevInfo->state.created == 0) {
1293                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1294         }
1295
1296 Away:
1297         if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1298                 device_epilog(busNo, devNo, segment_state_running,
1299                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1300                               inmsg->hdr.flags.response_expected == 1,
1301                               FOR_VISORBUS(
1302                                         pDevInfo->chan_info.channel_type_uuid));
1303 }
1304
1305 /* When provided with the physical address of the controlvm channel
1306  * (phys_addr), the offset to the payload area we need to manage
1307  * (offset), and the size of this payload area (bytes), fills in the
1308  * controlvm_payload_info struct.  Returns TRUE for success or FALSE
1309  * for failure.
1310  */
1311 static int
1312 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1313                                   struct controlvm_payload_info *info)
1314 {
1315         u8 __iomem *payload = NULL;
1316         int rc = CONTROLVM_RESP_SUCCESS;
1317
1318         if (info == NULL) {
1319                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1320                 goto Away;
1321         }
1322         memset(info, 0, sizeof(struct controlvm_payload_info));
1323         if ((offset == 0) || (bytes == 0)) {
1324                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1325                 goto Away;
1326         }
1327         payload = ioremap_cache(phys_addr + offset, bytes);
1328         if (payload == NULL) {
1329                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1330                 goto Away;
1331         }
1332
1333         info->offset = offset;
1334         info->bytes = bytes;
1335         info->ptr = payload;
1336
1337 Away:
1338         if (rc < 0) {
1339                 if (payload != NULL) {
1340                         iounmap(payload);
1341                         payload = NULL;
1342                 }
1343         }
1344         return rc;
1345 }
1346
1347 static void
1348 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1349 {
1350         if (info->ptr != NULL) {
1351                 iounmap(info->ptr);
1352                 info->ptr = NULL;
1353         }
1354         memset(info, 0, sizeof(struct controlvm_payload_info));
1355 }
1356
1357 static void
1358 initialize_controlvm_payload(void)
1359 {
1360         HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1361         u64 payloadOffset = 0;
1362         u32 payloadBytes = 0;
1363
1364         if (visorchannel_read(controlvm_channel,
1365                               offsetof(struct spar_controlvm_channel_protocol,
1366                                        request_payload_offset),
1367                               &payloadOffset, sizeof(payloadOffset)) < 0) {
1368                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1369                                  POSTCODE_SEVERITY_ERR);
1370                 return;
1371         }
1372         if (visorchannel_read(controlvm_channel,
1373                               offsetof(struct spar_controlvm_channel_protocol,
1374                                        request_payload_bytes),
1375                               &payloadBytes, sizeof(payloadBytes)) < 0) {
1376                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1377                                  POSTCODE_SEVERITY_ERR);
1378                 return;
1379         }
1380         initialize_controlvm_payload_info(phys_addr,
1381                                           payloadOffset, payloadBytes,
1382                                           &controlvm_payload_info);
1383 }
1384
1385 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1386  *  Returns CONTROLVM_RESP_xxx code.
1387  */
1388 int
1389 visorchipset_chipset_ready(void)
1390 {
1391         kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1392         return CONTROLVM_RESP_SUCCESS;
1393 }
1394 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1395
1396 int
1397 visorchipset_chipset_selftest(void)
1398 {
1399         char env_selftest[20];
1400         char *envp[] = { env_selftest, NULL };
1401
1402         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1403         kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1404                            envp);
1405         return CONTROLVM_RESP_SUCCESS;
1406 }
1407 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1408
1409 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1410  *  Returns CONTROLVM_RESP_xxx code.
1411  */
1412 int
1413 visorchipset_chipset_notready(void)
1414 {
1415         kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1416         return CONTROLVM_RESP_SUCCESS;
1417 }
1418 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1419
1420 static void
1421 chipset_ready(struct controlvm_message_header *msgHdr)
1422 {
1423         int rc = visorchipset_chipset_ready();
1424
1425         if (rc != CONTROLVM_RESP_SUCCESS)
1426                 rc = -rc;
1427         if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1428                 controlvm_respond(msgHdr, rc);
1429         if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1430                 /* Send CHIPSET_READY response when all modules have been loaded
1431                  * and disks mounted for the partition
1432                  */
1433                 g_chipset_msg_hdr = *msgHdr;
1434         }
1435 }
1436
1437 static void
1438 chipset_selftest(struct controlvm_message_header *msgHdr)
1439 {
1440         int rc = visorchipset_chipset_selftest();
1441
1442         if (rc != CONTROLVM_RESP_SUCCESS)
1443                 rc = -rc;
1444         if (msgHdr->flags.response_expected)
1445                 controlvm_respond(msgHdr, rc);
1446 }
1447
1448 static void
1449 chipset_notready(struct controlvm_message_header *msgHdr)
1450 {
1451         int rc = visorchipset_chipset_notready();
1452
1453         if (rc != CONTROLVM_RESP_SUCCESS)
1454                 rc = -rc;
1455         if (msgHdr->flags.response_expected)
1456                 controlvm_respond(msgHdr, rc);
1457 }
1458
1459 /* This is your "one-stop" shop for grabbing the next message from the
1460  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1461  */
1462 static BOOL
1463 read_controlvm_event(struct controlvm_message *msg)
1464 {
1465         if (visorchannel_signalremove(controlvm_channel,
1466                                       CONTROLVM_QUEUE_EVENT, msg)) {
1467                 /* got a message */
1468                 if (msg->hdr.flags.test_message == 1)
1469                         return FALSE;
1470                 return TRUE;
1471         }
1472         return FALSE;
1473 }
1474
1475 /*
1476  * The general parahotplug flow works as follows.  The visorchipset
1477  * driver receives a DEVICE_CHANGESTATE message from Command
1478  * specifying a physical device to enable or disable.  The CONTROLVM
1479  * message handler calls parahotplug_process_message, which then adds
1480  * the message to a global list and kicks off a udev event which
1481  * causes a user level script to enable or disable the specified
1482  * device.  The udev script then writes to
1483  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1484  * to get called, at which point the appropriate CONTROLVM message is
1485  * retrieved from the list and responded to.
1486  */
1487
1488 #define PARAHOTPLUG_TIMEOUT_MS 2000
1489
1490 /*
1491  * Generate unique int to match an outstanding CONTROLVM message with a
1492  * udev script /proc response
1493  */
1494 static int
1495 parahotplug_next_id(void)
1496 {
1497         static atomic_t id = ATOMIC_INIT(0);
1498
1499         return atomic_inc_return(&id);
1500 }
1501
1502 /*
1503  * Returns the time (in jiffies) when a CONTROLVM message on the list
1504  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1505  */
1506 static unsigned long
1507 parahotplug_next_expiration(void)
1508 {
1509         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1510 }
1511
1512 /*
1513  * Create a parahotplug_request, which is basically a wrapper for a
1514  * CONTROLVM_MESSAGE that we can stick on a list
1515  */
1516 static struct parahotplug_request *
1517 parahotplug_request_create(struct controlvm_message *msg)
1518 {
1519         struct parahotplug_request *req;
1520
1521         req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
1522         if (req == NULL)
1523                 return NULL;
1524
1525         req->id = parahotplug_next_id();
1526         req->expiration = parahotplug_next_expiration();
1527         req->msg = *msg;
1528
1529         return req;
1530 }
1531
1532 /*
1533  * Free a parahotplug_request.
1534  */
1535 static void
1536 parahotplug_request_destroy(struct parahotplug_request *req)
1537 {
1538         kfree(req);
1539 }
1540
1541 /*
1542  * Cause uevent to run the user level script to do the disable/enable
1543  * specified in (the CONTROLVM message in) the specified
1544  * parahotplug_request
1545  */
1546 static void
1547 parahotplug_request_kickoff(struct parahotplug_request *req)
1548 {
1549         struct controlvm_message_packet *cmd = &req->msg.cmd;
1550         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1551             env_func[40];
1552         char *envp[] = {
1553                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1554         };
1555
1556         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1557         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1558         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1559                 cmd->device_change_state.state.active);
1560         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1561                 cmd->device_change_state.bus_no);
1562         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1563                 cmd->device_change_state.dev_no >> 3);
1564         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1565                 cmd->device_change_state.dev_no & 0x7);
1566
1567         kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1568                            envp);
1569 }
1570
1571 /*
1572  * Remove any request from the list that's been on there too long and
1573  * respond with an error.
1574  */
1575 static void
1576 parahotplug_process_list(void)
1577 {
1578         struct list_head *pos = NULL;
1579         struct list_head *tmp = NULL;
1580
1581         spin_lock(&Parahotplug_request_list_lock);
1582
1583         list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1584                 struct parahotplug_request *req =
1585                     list_entry(pos, struct parahotplug_request, list);
1586                 if (time_after_eq(jiffies, req->expiration)) {
1587                         list_del(pos);
1588                         if (req->msg.hdr.flags.response_expected)
1589                                 controlvm_respond_physdev_changestate(
1590                                         &req->msg.hdr,
1591                                         CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1592                                         req->msg.cmd.device_change_state.state);
1593                         parahotplug_request_destroy(req);
1594                 }
1595         }
1596
1597         spin_unlock(&Parahotplug_request_list_lock);
1598 }
1599
1600 /*
1601  * Called from the /proc handler, which means the user script has
1602  * finished the enable/disable.  Find the matching identifier, and
1603  * respond to the CONTROLVM message with success.
1604  */
1605 static int
1606 parahotplug_request_complete(int id, u16 active)
1607 {
1608         struct list_head *pos = NULL;
1609         struct list_head *tmp = NULL;
1610
1611         spin_lock(&Parahotplug_request_list_lock);
1612
1613         /* Look for a request matching "id". */
1614         list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1615                 struct parahotplug_request *req =
1616                     list_entry(pos, struct parahotplug_request, list);
1617                 if (req->id == id) {
1618                         /* Found a match.  Remove it from the list and
1619                          * respond.
1620                          */
1621                         list_del(pos);
1622                         spin_unlock(&Parahotplug_request_list_lock);
1623                         req->msg.cmd.device_change_state.state.active = active;
1624                         if (req->msg.hdr.flags.response_expected)
1625                                 controlvm_respond_physdev_changestate(
1626                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1627                                         req->msg.cmd.device_change_state.state);
1628                         parahotplug_request_destroy(req);
1629                         return 0;
1630                 }
1631         }
1632
1633         spin_unlock(&Parahotplug_request_list_lock);
1634         return -1;
1635 }
1636
1637 /*
1638  * Enables or disables a PCI device by kicking off a udev script
1639  */
1640 static void
1641 parahotplug_process_message(struct controlvm_message *inmsg)
1642 {
1643         struct parahotplug_request *req;
1644
1645         req = parahotplug_request_create(inmsg);
1646
1647         if (req == NULL)
1648                 return;
1649
1650         if (inmsg->cmd.device_change_state.state.active) {
1651                 /* For enable messages, just respond with success
1652                 * right away.  This is a bit of a hack, but there are
1653                 * issues with the early enable messages we get (with
1654                 * either the udev script not detecting that the device
1655                 * is up, or not getting called at all).  Fortunately
1656                 * the messages that get lost don't matter anyway, as
1657                 * devices are automatically enabled at
1658                 * initialization.
1659                 */
1660                 parahotplug_request_kickoff(req);
1661                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1662                                 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1663                                 device_change_state.state);
1664                 parahotplug_request_destroy(req);
1665         } else {
1666                 /* For disable messages, add the request to the
1667                 * request list before kicking off the udev script.  It
1668                 * won't get responded to until the script has
1669                 * indicated it's done.
1670                 */
1671                 spin_lock(&Parahotplug_request_list_lock);
1672                 list_add_tail(&(req->list), &Parahotplug_request_list);
1673                 spin_unlock(&Parahotplug_request_list_lock);
1674
1675                 parahotplug_request_kickoff(req);
1676         }
1677 }
1678
1679 /* Process a controlvm message.
1680  * Return result:
1681  *    FALSE - this function will return FALSE only in the case where the
1682  *            controlvm message was NOT processed, but processing must be
1683  *            retried before reading the next controlvm message; a
1684  *            scenario where this can occur is when we need to throttle
1685  *            the allocation of memory in which to copy out controlvm
1686  *            payload data
1687  *    TRUE  - processing of the controlvm message completed,
1688  *            either successfully or with an error.
1689  */
1690 static BOOL
1691 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1692 {
1693         struct controlvm_message_packet *cmd = &inmsg.cmd;
1694         u64 parametersAddr = 0;
1695         u32 parametersBytes = 0;
1696         struct parser_context *parser_ctx = NULL;
1697         BOOL isLocalAddr = FALSE;
1698         struct controlvm_message ackmsg;
1699
1700         /* create parsing context if necessary */
1701         isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1702         if (channel_addr == 0)
1703                 return TRUE;
1704         parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1705         parametersBytes = inmsg.hdr.payload_bytes;
1706
1707         /* Parameter and channel addresses within test messages actually lie
1708          * within our OS-controlled memory.  We need to know that, because it
1709          * makes a difference in how we compute the virtual address.
1710          */
1711         if (parametersAddr != 0 && parametersBytes != 0) {
1712                 BOOL retry = FALSE;
1713
1714                 parser_ctx =
1715                     parser_init_byte_stream(parametersAddr, parametersBytes,
1716                                            isLocalAddr, &retry);
1717                 if (!parser_ctx && retry)
1718                         return FALSE;
1719         }
1720
1721         if (!isLocalAddr) {
1722                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1723                                         CONTROLVM_RESP_SUCCESS);
1724                 if (controlvm_channel)
1725                         visorchannel_signalinsert(controlvm_channel,
1726                                                   CONTROLVM_QUEUE_ACK,
1727                                                   &ackmsg);
1728         }
1729         switch (inmsg.hdr.id) {
1730         case CONTROLVM_CHIPSET_INIT:
1731                 chipset_init(&inmsg);
1732                 break;
1733         case CONTROLVM_BUS_CREATE:
1734                 bus_create(&inmsg);
1735                 break;
1736         case CONTROLVM_BUS_DESTROY:
1737                 bus_destroy(&inmsg);
1738                 break;
1739         case CONTROLVM_BUS_CONFIGURE:
1740                 bus_configure(&inmsg, parser_ctx);
1741                 break;
1742         case CONTROLVM_DEVICE_CREATE:
1743                 my_device_create(&inmsg);
1744                 break;
1745         case CONTROLVM_DEVICE_CHANGESTATE:
1746                 if (cmd->device_change_state.flags.phys_device) {
1747                         parahotplug_process_message(&inmsg);
1748                 } else {
1749                         /* save the hdr and cmd structures for later use */
1750                         /* when sending back the response to Command */
1751                         my_device_changestate(&inmsg);
1752                         g_diag_msg_hdr = inmsg.hdr;
1753                         g_devicechangestate_packet = inmsg.cmd;
1754                         break;
1755                 }
1756                 break;
1757         case CONTROLVM_DEVICE_DESTROY:
1758                 my_device_destroy(&inmsg);
1759                 break;
1760         case CONTROLVM_DEVICE_CONFIGURE:
1761                 /* no op for now, just send a respond that we passed */
1762                 if (inmsg.hdr.flags.response_expected)
1763                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1764                 break;
1765         case CONTROLVM_CHIPSET_READY:
1766                 chipset_ready(&inmsg.hdr);
1767                 break;
1768         case CONTROLVM_CHIPSET_SELFTEST:
1769                 chipset_selftest(&inmsg.hdr);
1770                 break;
1771         case CONTROLVM_CHIPSET_STOP:
1772                 chipset_notready(&inmsg.hdr);
1773                 break;
1774         default:
1775                 if (inmsg.hdr.flags.response_expected)
1776                         controlvm_respond(&inmsg.hdr,
1777                                           -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1778                 break;
1779         }
1780
1781         if (parser_ctx != NULL) {
1782                 parser_done(parser_ctx);
1783                 parser_ctx = NULL;
1784         }
1785         return TRUE;
1786 }
1787
1788 static HOSTADDRESS controlvm_get_channel_address(void)
1789 {
1790         u64 addr = 0;
1791         u32 size = 0;
1792
1793         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1794                 return 0;
1795
1796         return addr;
1797 }
1798
1799 static void
1800 controlvm_periodic_work(struct work_struct *work)
1801 {
1802         struct controlvm_message inmsg;
1803         BOOL gotACommand = FALSE;
1804         BOOL handle_command_failed = FALSE;
1805         static u64 Poll_Count;
1806
1807         /* make sure visorbus server is registered for controlvm callbacks */
1808         if (visorchipset_serverregwait && !serverregistered)
1809                 goto Away;
1810         /* make sure visorclientbus server is regsitered for controlvm
1811          * callbacks
1812          */
1813         if (visorchipset_clientregwait && !clientregistered)
1814                 goto Away;
1815
1816         Poll_Count++;
1817         if (Poll_Count >= 250)
1818                 ;       /* keep going */
1819         else
1820                 goto Away;
1821
1822         /* Check events to determine if response to CHIPSET_READY
1823          * should be sent
1824          */
1825         if (visorchipset_holdchipsetready &&
1826             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1827                 if (check_chipset_events() == 1) {
1828                         controlvm_respond(&g_chipset_msg_hdr, 0);
1829                         clear_chipset_events();
1830                         memset(&g_chipset_msg_hdr, 0,
1831                                sizeof(struct controlvm_message_header));
1832                 }
1833         }
1834
1835         while (visorchannel_signalremove(controlvm_channel,
1836                                          CONTROLVM_QUEUE_RESPONSE,
1837                                          &inmsg))
1838                 ;
1839         if (!gotACommand) {
1840                 if (ControlVm_Pending_Msg_Valid) {
1841                         /* we throttled processing of a prior
1842                         * msg, so try to process it again
1843                         * rather than reading a new one
1844                         */
1845                         inmsg = ControlVm_Pending_Msg;
1846                         ControlVm_Pending_Msg_Valid = FALSE;
1847                         gotACommand = TRUE;
1848                 } else
1849                         gotACommand = read_controlvm_event(&inmsg);
1850         }
1851
1852         handle_command_failed = FALSE;
1853         while (gotACommand && (!handle_command_failed)) {
1854                 most_recent_message_jiffies = jiffies;
1855                 if (handle_command(inmsg,
1856                                    visorchannel_get_physaddr
1857                                    (controlvm_channel)))
1858                         gotACommand = read_controlvm_event(&inmsg);
1859                 else {
1860                         /* this is a scenario where throttling
1861                         * is required, but probably NOT an
1862                         * error...; we stash the current
1863                         * controlvm msg so we will attempt to
1864                         * reprocess it on our next loop
1865                         */
1866                         handle_command_failed = TRUE;
1867                         ControlVm_Pending_Msg = inmsg;
1868                         ControlVm_Pending_Msg_Valid = TRUE;
1869                 }
1870         }
1871
1872         /* parahotplug_worker */
1873         parahotplug_process_list();
1874
1875 Away:
1876
1877         if (time_after(jiffies,
1878                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1879                 /* it's been longer than MIN_IDLE_SECONDS since we
1880                 * processed our last controlvm message; slow down the
1881                 * polling
1882                 */
1883                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1884                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1885         } else {
1886                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1887                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1888         }
1889
1890         queue_delayed_work(periodic_controlvm_workqueue,
1891                            &periodic_controlvm_work, poll_jiffies);
1892 }
1893
1894 static void
1895 setup_crash_devices_work_queue(struct work_struct *work)
1896 {
1897         struct controlvm_message localCrashCreateBusMsg;
1898         struct controlvm_message localCrashCreateDevMsg;
1899         struct controlvm_message msg;
1900         u32 localSavedCrashMsgOffset;
1901         u16 localSavedCrashMsgCount;
1902
1903         /* make sure visorbus server is registered for controlvm callbacks */
1904         if (visorchipset_serverregwait && !serverregistered)
1905                 goto Away;
1906
1907         /* make sure visorclientbus server is regsitered for controlvm
1908          * callbacks
1909          */
1910         if (visorchipset_clientregwait && !clientregistered)
1911                 goto Away;
1912
1913         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1914
1915         /* send init chipset msg */
1916         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1917         msg.cmd.init_chipset.bus_count = 23;
1918         msg.cmd.init_chipset.switch_count = 0;
1919
1920         chipset_init(&msg);
1921
1922         /* get saved message count */
1923         if (visorchannel_read(controlvm_channel,
1924                               offsetof(struct spar_controlvm_channel_protocol,
1925                                        saved_crash_message_count),
1926                               &localSavedCrashMsgCount, sizeof(u16)) < 0) {
1927                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1928                                  POSTCODE_SEVERITY_ERR);
1929                 return;
1930         }
1931
1932         if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
1933                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1934                                  localSavedCrashMsgCount,
1935                                  POSTCODE_SEVERITY_ERR);
1936                 return;
1937         }
1938
1939         /* get saved crash message offset */
1940         if (visorchannel_read(controlvm_channel,
1941                               offsetof(struct spar_controlvm_channel_protocol,
1942                                        saved_crash_message_offset),
1943                               &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
1944                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1945                                  POSTCODE_SEVERITY_ERR);
1946                 return;
1947         }
1948
1949         /* read create device message for storage bus offset */
1950         if (visorchannel_read(controlvm_channel,
1951                               localSavedCrashMsgOffset,
1952                               &localCrashCreateBusMsg,
1953                               sizeof(struct controlvm_message)) < 0) {
1954                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1955                                  POSTCODE_SEVERITY_ERR);
1956                 return;
1957         }
1958
1959         /* read create device message for storage device */
1960         if (visorchannel_read(controlvm_channel,
1961                               localSavedCrashMsgOffset +
1962                               sizeof(struct controlvm_message),
1963                               &localCrashCreateDevMsg,
1964                               sizeof(struct controlvm_message)) < 0) {
1965                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1966                                  POSTCODE_SEVERITY_ERR);
1967                 return;
1968         }
1969
1970         /* reuse IOVM create bus message */
1971         if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
1972                 bus_create(&localCrashCreateBusMsg);
1973         else {
1974                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1975                                  POSTCODE_SEVERITY_ERR);
1976                 return;
1977         }
1978
1979         /* reuse create device message for storage device */
1980         if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
1981                 my_device_create(&localCrashCreateDevMsg);
1982         else {
1983                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1984                                  POSTCODE_SEVERITY_ERR);
1985                 return;
1986         }
1987         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1988         return;
1989
1990 Away:
1991
1992         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1993
1994         queue_delayed_work(periodic_controlvm_workqueue,
1995                            &periodic_controlvm_work, poll_jiffies);
1996 }
1997
1998 static void
1999 bus_create_response(ulong busNo, int response)
2000 {
2001         bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2002 }
2003
2004 static void
2005 bus_destroy_response(ulong busNo, int response)
2006 {
2007         bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2008 }
2009
2010 static void
2011 device_create_response(ulong busNo, ulong devNo, int response)
2012 {
2013         device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2014 }
2015
2016 static void
2017 device_destroy_response(ulong busNo, ulong devNo, int response)
2018 {
2019         device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2020 }
2021
2022 void
2023 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
2024 {
2025         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2026                                      bus_no, dev_no, response,
2027                                      segment_state_standby);
2028 }
2029 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2030
2031 static void
2032 device_resume_response(ulong busNo, ulong devNo, int response)
2033 {
2034         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2035                                      busNo, devNo, response,
2036                                      segment_state_running);
2037 }
2038
2039 BOOL
2040 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2041 {
2042         void *p = findbus(&bus_info_list, bus_no);
2043
2044         if (!p)
2045                 return FALSE;
2046         memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2047         return TRUE;
2048 }
2049 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2050
2051 BOOL
2052 visorchipset_set_bus_context(ulong bus_no, void *context)
2053 {
2054         struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2055
2056         if (!p)
2057                 return FALSE;
2058         p->bus_driver_context = context;
2059         return TRUE;
2060 }
2061 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2062
2063 BOOL
2064 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2065                              struct visorchipset_device_info *dev_info)
2066 {
2067         void *p = finddevice(&dev_info_list, bus_no, dev_no);
2068
2069         if (!p)
2070                 return FALSE;
2071         memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2072         return TRUE;
2073 }
2074 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2075
2076 BOOL
2077 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2078 {
2079         struct visorchipset_device_info *p =
2080                         finddevice(&dev_info_list, bus_no, dev_no);
2081
2082         if (!p)
2083                 return FALSE;
2084         p->bus_driver_context = context;
2085         return TRUE;
2086 }
2087 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2088
2089 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2090  */
2091 void *
2092 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2093                          char *fn, int ln)
2094 {
2095         gfp_t gfp;
2096         void *p;
2097
2098         if (ok_to_block)
2099                 gfp = GFP_KERNEL;
2100         else
2101                 gfp = GFP_ATOMIC;
2102         /* __GFP_NORETRY means "ok to fail", meaning
2103          * kmem_cache_alloc() can return NULL, implying the caller CAN
2104          * cope with failure.  If you do NOT specify __GFP_NORETRY,
2105          * Linux will go to extreme measures to get memory for you
2106          * (like, invoke oom killer), which will probably cripple the
2107          * system.
2108          */
2109         gfp |= __GFP_NORETRY;
2110         p = kmem_cache_alloc(pool, gfp);
2111         if (!p)
2112                 return NULL;
2113
2114         atomic_inc(&Visorchipset_cache_buffers_in_use);
2115         return p;
2116 }
2117
2118 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2119  */
2120 void
2121 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2122 {
2123         if (!p)
2124                 return;
2125
2126         atomic_dec(&Visorchipset_cache_buffers_in_use);
2127         kmem_cache_free(pool, p);
2128 }
2129
2130 static ssize_t chipsetready_store(struct device *dev,
2131         struct device_attribute *attr, const char *buf, size_t count)
2132 {
2133         char msgtype[64];
2134
2135         if (sscanf(buf, "%63s", msgtype) != 1)
2136                 return -EINVAL;
2137
2138         if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2139                 chipset_events[0] = 1;
2140                 return count;
2141         } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2142                 chipset_events[1] = 1;
2143                 return count;
2144         }
2145         return -EINVAL;
2146 }
2147
2148 /* The parahotplug/devicedisabled interface gets called by our support script
2149  * when an SR-IOV device has been shut down. The ID is passed to the script
2150  * and then passed back when the device has been removed.
2151  */
2152 static ssize_t devicedisabled_store(struct device *dev,
2153         struct device_attribute *attr, const char *buf, size_t count)
2154 {
2155         uint id;
2156
2157         if (kstrtouint(buf, 10, &id) != 0)
2158                 return -EINVAL;
2159
2160         parahotplug_request_complete(id, 0);
2161         return count;
2162 }
2163
2164 /* The parahotplug/deviceenabled interface gets called by our support script
2165  * when an SR-IOV device has been recovered. The ID is passed to the script
2166  * and then passed back when the device has been brought back up.
2167  */
2168 static ssize_t deviceenabled_store(struct device *dev,
2169         struct device_attribute *attr, const char *buf, size_t count)
2170 {
2171         uint id;
2172
2173         if (kstrtouint(buf, 10, &id) != 0)
2174                 return -EINVAL;
2175
2176         parahotplug_request_complete(id, 1);
2177         return count;
2178 }
2179
2180 static int __init
2181 visorchipset_init(void)
2182 {
2183         int rc = 0, x = 0;
2184         HOSTADDRESS addr;
2185
2186         if (!unisys_spar_platform)
2187                 return -ENODEV;
2188
2189         memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2190         memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2191         memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2192         memset(&livedump_info, 0, sizeof(livedump_info));
2193         atomic_set(&livedump_info.buffers_in_use, 0);
2194
2195         if (visorchipset_testvnic) {
2196                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2197                 rc = x;
2198                 goto Away;
2199         }
2200
2201         addr = controlvm_get_channel_address();
2202         if (addr != 0) {
2203                 controlvm_channel =
2204                     visorchannel_create_with_lock
2205                     (addr,
2206                      sizeof(struct spar_controlvm_channel_protocol),
2207                      spar_controlvm_channel_protocol_uuid);
2208                 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2209                                 visorchannel_get_header(controlvm_channel))) {
2210                         initialize_controlvm_payload();
2211                 } else {
2212                         visorchannel_destroy(controlvm_channel);
2213                         controlvm_channel = NULL;
2214                         return -ENODEV;
2215                 }
2216         } else {
2217                 return -ENODEV;
2218         }
2219
2220         MajorDev = MKDEV(visorchipset_major, 0);
2221         rc = visorchipset_file_init(MajorDev, &controlvm_channel);
2222         if (rc < 0) {
2223                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2224                 goto Away;
2225         }
2226
2227         memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2228
2229         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2230
2231         memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2232
2233         Putfile_buffer_list_pool =
2234             kmem_cache_create(Putfile_buffer_list_pool_name,
2235                               sizeof(struct putfile_buffer_entry),
2236                               0, SLAB_HWCACHE_ALIGN, NULL);
2237         if (!Putfile_buffer_list_pool) {
2238                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2239                 rc = -1;
2240                 goto Away;
2241         }
2242         if (!visorchipset_disable_controlvm) {
2243                 /* if booting in a crash kernel */
2244                 if (visorchipset_crash_kernel)
2245                         INIT_DELAYED_WORK(&periodic_controlvm_work,
2246                                           setup_crash_devices_work_queue);
2247                 else
2248                         INIT_DELAYED_WORK(&periodic_controlvm_work,
2249                                           controlvm_periodic_work);
2250                 periodic_controlvm_workqueue =
2251                     create_singlethread_workqueue("visorchipset_controlvm");
2252
2253                 if (periodic_controlvm_workqueue == NULL) {
2254                         POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2255                                          DIAG_SEVERITY_ERR);
2256                         rc = -ENOMEM;
2257                         goto Away;
2258                 }
2259                 most_recent_message_jiffies = jiffies;
2260                 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2261                 rc = queue_delayed_work(periodic_controlvm_workqueue,
2262                                         &periodic_controlvm_work, poll_jiffies);
2263                 if (rc < 0) {
2264                         POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2265                                          DIAG_SEVERITY_ERR);
2266                         goto Away;
2267                 }
2268         }
2269
2270         Visorchipset_platform_device.dev.devt = MajorDev;
2271         if (platform_device_register(&Visorchipset_platform_device) < 0) {
2272                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2273                 rc = -1;
2274                 goto Away;
2275         }
2276         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2277         rc = 0;
2278 Away:
2279         if (rc) {
2280                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2281                                  POSTCODE_SEVERITY_ERR);
2282         }
2283         return rc;
2284 }
2285
2286 static void
2287 visorchipset_exit(void)
2288 {
2289         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2290
2291         if (visorchipset_disable_controlvm) {
2292                 ;
2293         } else {
2294                 cancel_delayed_work(&periodic_controlvm_work);
2295                 flush_workqueue(periodic_controlvm_workqueue);
2296                 destroy_workqueue(periodic_controlvm_workqueue);
2297                 periodic_controlvm_workqueue = NULL;
2298                 destroy_controlvm_payload_info(&controlvm_payload_info);
2299         }
2300         if (Putfile_buffer_list_pool) {
2301                 kmem_cache_destroy(Putfile_buffer_list_pool);
2302                 Putfile_buffer_list_pool = NULL;
2303         }
2304
2305         cleanup_controlvm_structures();
2306
2307         memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2308
2309         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2310
2311         memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2312
2313         visorchannel_destroy(controlvm_channel);
2314
2315         visorchipset_file_cleanup();
2316         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2317 }
2318
2319 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2320 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2321 int visorchipset_testvnic = 0;
2322
2323 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2324 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2325 int visorchipset_testvnicclient = 0;
2326
2327 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2328 MODULE_PARM_DESC(visorchipset_testmsg,
2329                  "1 to manufacture the chipset, bus, and switch messages");
2330 int visorchipset_testmsg = 0;
2331
2332 module_param_named(major, visorchipset_major, int, S_IRUGO);
2333 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2334 int visorchipset_major = 0;
2335
2336 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2337 MODULE_PARM_DESC(visorchipset_serverreqwait,
2338                  "1 to have the module wait for the visor bus to register");
2339 int visorchipset_serverregwait = 0;     /* default is off */
2340 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2341 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2342 int visorchipset_clientregwait = 1;     /* default is on */
2343 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2344 MODULE_PARM_DESC(visorchipset_testteardown,
2345                  "1 to test teardown of the chipset, bus, and switch");
2346 int visorchipset_testteardown = 0;      /* default is off */
2347 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2348                    S_IRUGO);
2349 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2350                  "1 to disable polling of controlVm channel");
2351 int visorchipset_disable_controlvm = 0; /* default is off */
2352 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2353 MODULE_PARM_DESC(visorchipset_crash_kernel,
2354                  "1 means we are running in crash kernel");
2355 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2356 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2357                    int, S_IRUGO);
2358 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2359                  "1 to hold response to CHIPSET_READY");
2360 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2361                                       * response immediately */
2362 module_init(visorchipset_init);
2363 module_exit(visorchipset_exit);
2364
2365 MODULE_AUTHOR("Unisys");
2366 MODULE_LICENSE("GPL");
2367 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2368                    VERSION);
2369 MODULE_VERSION(VERSION);