432158d2e3188d8552e167fc13cd61b2b2822914
[firefly-linux-kernel-4.4.55.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  */
17
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
28
29 #include "channel_guid.h"
30 #include "controlvmchannel.h"
31 #include "controlvmcompletionstatus.h"
32 #include "guestlinuxdebug.h"
33 #include "periodic_work.h"
34 #include "version.h"
35 #include "visorbus.h"
36 #include "visorbus_private.h"
37 #include "vmcallinterface.h"
38
39 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
40
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE   50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
48
49 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET     0x00000000
50
51
52 #define UNISYS_SPAR_LEAF_ID 0x40000000
53
54 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55 #define UNISYS_SPAR_ID_EBX 0x73696e55
56 #define UNISYS_SPAR_ID_ECX 0x70537379
57 #define UNISYS_SPAR_ID_EDX 0x34367261
58
59 /*
60  * Module parameters
61  */
62 static int visorchipset_major;
63 static int visorchipset_visorbusregwait = 1;    /* default is on */
64 static int visorchipset_holdchipsetready;
65 static unsigned long controlvm_payload_bytes_buffered;
66
67 static int
68 visorchipset_open(struct inode *inode, struct file *file)
69 {
70         unsigned minor_number = iminor(inode);
71
72         if (minor_number)
73                 return -ENODEV;
74         file->private_data = NULL;
75         return 0;
76 }
77
78 static int
79 visorchipset_release(struct inode *inode, struct file *file)
80 {
81         return 0;
82 }
83
84 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85 * we switch to slow polling mode.  As soon as we get a controlvm
86 * message, we switch back to fast polling mode.
87 */
88 #define MIN_IDLE_SECONDS 10
89 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90 static unsigned long most_recent_message_jiffies;       /* when we got our last
91                                                  * controlvm message */
92 static int visorbusregistered;
93
94 #define MAX_CHIPSET_EVENTS 2
95 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
96
97 struct parser_context {
98         unsigned long allocbytes;
99         unsigned long param_bytes;
100         u8 *curr;
101         unsigned long bytes_remaining;
102         bool byte_stream;
103         char data[0];
104 };
105
106 static struct delayed_work periodic_controlvm_work;
107 static struct workqueue_struct *periodic_controlvm_workqueue;
108 static DEFINE_SEMAPHORE(notifier_lock);
109
110 static struct cdev file_cdev;
111 static struct visorchannel **file_controlvm_channel;
112 static struct controlvm_message_header g_chipset_msg_hdr;
113 static const uuid_le spar_diag_pool_channel_protocol_uuid =
114         SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
115 /* 0xffffff is an invalid Bus/Device number */
116 static u32 g_diagpool_bus_no = 0xffffff;
117 static u32 g_diagpool_dev_no = 0xffffff;
118 static struct controlvm_message_packet g_devicechangestate_packet;
119
120 #define is_diagpool_channel(channel_type_guid) \
121         (uuid_le_cmp(channel_type_guid,\
122                      spar_diag_pool_channel_protocol_uuid) == 0)
123
124 static LIST_HEAD(bus_info_list);
125 static LIST_HEAD(dev_info_list);
126
127 static struct visorchannel *controlvm_channel;
128
129 /* Manages the request payload in the controlvm channel */
130 struct visor_controlvm_payload_info {
131         u8 __iomem *ptr;        /* pointer to base address of payload pool */
132         u64 offset;             /* offset from beginning of controlvm
133                                  * channel to beginning of payload * pool */
134         u32 bytes;              /* number of bytes in payload pool */
135 };
136
137 static struct visor_controlvm_payload_info controlvm_payload_info;
138
139 /* The following globals are used to handle the scenario where we are unable to
140  * offload the payload from a controlvm message due to memory requirements.  In
141  * this scenario, we simply stash the controlvm message, then attempt to
142  * process it again the next time controlvm_periodic_work() runs.
143  */
144 static struct controlvm_message controlvm_pending_msg;
145 static bool controlvm_pending_msg_valid;
146
147 /* This identifies a data buffer that has been received via a controlvm messages
148  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
149  */
150 struct putfile_buffer_entry {
151         struct list_head next;  /* putfile_buffer_entry list */
152         struct parser_context *parser_ctx; /* points to input data buffer */
153 };
154
155 /* List of struct putfile_request *, via next_putfile_request member.
156  * Each entry in this list identifies an outstanding TRANSMIT_FILE
157  * conversation.
158  */
159 static LIST_HEAD(putfile_request_list);
160
161 /* This describes a buffer and its current state of transfer (e.g., how many
162  * bytes have already been supplied as putfile data, and how many bytes are
163  * remaining) for a putfile_request.
164  */
165 struct putfile_active_buffer {
166         /* a payload from a controlvm message, containing a file data buffer */
167         struct parser_context *parser_ctx;
168         /* points within data area of parser_ctx to next byte of data */
169         u8 *pnext;
170         /* # bytes left from <pnext> to the end of this data buffer */
171         size_t bytes_remaining;
172 };
173
174 #define PUTFILE_REQUEST_SIG 0x0906101302281211
175 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
176  * conversation.  Structs of this type are dynamically linked into
177  * <Putfile_request_list>.
178  */
179 struct putfile_request {
180         u64 sig;                /* PUTFILE_REQUEST_SIG */
181
182         /* header from original TransmitFile request */
183         struct controlvm_message_header controlvm_header;
184         u64 file_request_number;        /* from original TransmitFile request */
185
186         /* link to next struct putfile_request */
187         struct list_head next_putfile_request;
188
189         /* most-recent sequence number supplied via a controlvm message */
190         u64 data_sequence_number;
191
192         /* head of putfile_buffer_entry list, which describes the data to be
193          * supplied as putfile data;
194          * - this list is added to when controlvm messages come in that supply
195          * file data
196          * - this list is removed from via the hotplug program that is actually
197          * consuming these buffers to write as file data */
198         struct list_head input_buffer_list;
199         spinlock_t req_list_lock;       /* lock for input_buffer_list */
200
201         /* waiters for input_buffer_list to go non-empty */
202         wait_queue_head_t input_buffer_wq;
203
204         /* data not yet read within current putfile_buffer_entry */
205         struct putfile_active_buffer active_buf;
206
207         /* <0 = failed, 0 = in-progress, >0 = successful; */
208         /* note that this must be set with req_list_lock, and if you set <0, */
209         /* it is your responsibility to also free up all of the other objects */
210         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
211         /* before releasing the lock */
212         int completion_status;
213 };
214
215 struct parahotplug_request {
216         struct list_head list;
217         int id;
218         unsigned long expiration;
219         struct controlvm_message msg;
220 };
221
222 static LIST_HEAD(parahotplug_request_list);
223 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
224 static void parahotplug_process_list(void);
225
226 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
227  * CONTROLVM_REPORTEVENT.
228  */
229 static struct visorchipset_busdev_notifiers busdev_notifiers;
230
231 static void bus_create_response(struct visorchipset_bus_info *p, int response);
232 static void bus_destroy_response(struct visorchipset_bus_info *p, int response);
233 static void device_create_response(u32 bus_no, u32 dev_no, int response);
234 static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
235 static void device_resume_response(u32 bus_no, u32 dev_no, int response);
236
237 static void visorchipset_device_pause_response(u32 bus_no, u32 dev_no,
238                                                int response);
239
240 static struct visorchipset_busdev_responders busdev_responders = {
241         .bus_create = bus_create_response,
242         .bus_destroy = bus_destroy_response,
243         .device_create = device_create_response,
244         .device_destroy = device_destroy_response,
245         .device_pause = visorchipset_device_pause_response,
246         .device_resume = device_resume_response,
247 };
248
249 /* info for /dev/visorchipset */
250 static dev_t major_dev = -1; /**< indicates major num for device */
251
252 /* prototypes for attributes */
253 static ssize_t toolaction_show(struct device *dev,
254                                struct device_attribute *attr, char *buf);
255 static ssize_t toolaction_store(struct device *dev,
256                                 struct device_attribute *attr,
257                                 const char *buf, size_t count);
258 static DEVICE_ATTR_RW(toolaction);
259
260 static ssize_t boottotool_show(struct device *dev,
261                                struct device_attribute *attr, char *buf);
262 static ssize_t boottotool_store(struct device *dev,
263                                 struct device_attribute *attr, const char *buf,
264                                 size_t count);
265 static DEVICE_ATTR_RW(boottotool);
266
267 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
268                           char *buf);
269 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
270                            const char *buf, size_t count);
271 static DEVICE_ATTR_RW(error);
272
273 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
274                            char *buf);
275 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
276                             const char *buf, size_t count);
277 static DEVICE_ATTR_RW(textid);
278
279 static ssize_t remaining_steps_show(struct device *dev,
280                                     struct device_attribute *attr, char *buf);
281 static ssize_t remaining_steps_store(struct device *dev,
282                                      struct device_attribute *attr,
283                                      const char *buf, size_t count);
284 static DEVICE_ATTR_RW(remaining_steps);
285
286 static ssize_t chipsetready_store(struct device *dev,
287                                   struct device_attribute *attr,
288                                   const char *buf, size_t count);
289 static DEVICE_ATTR_WO(chipsetready);
290
291 static ssize_t devicedisabled_store(struct device *dev,
292                                     struct device_attribute *attr,
293                                     const char *buf, size_t count);
294 static DEVICE_ATTR_WO(devicedisabled);
295
296 static ssize_t deviceenabled_store(struct device *dev,
297                                    struct device_attribute *attr,
298                                    const char *buf, size_t count);
299 static DEVICE_ATTR_WO(deviceenabled);
300
301 static struct attribute *visorchipset_install_attrs[] = {
302         &dev_attr_toolaction.attr,
303         &dev_attr_boottotool.attr,
304         &dev_attr_error.attr,
305         &dev_attr_textid.attr,
306         &dev_attr_remaining_steps.attr,
307         NULL
308 };
309
310 static struct attribute_group visorchipset_install_group = {
311         .name = "install",
312         .attrs = visorchipset_install_attrs
313 };
314
315 static struct attribute *visorchipset_guest_attrs[] = {
316         &dev_attr_chipsetready.attr,
317         NULL
318 };
319
320 static struct attribute_group visorchipset_guest_group = {
321         .name = "guest",
322         .attrs = visorchipset_guest_attrs
323 };
324
325 static struct attribute *visorchipset_parahotplug_attrs[] = {
326         &dev_attr_devicedisabled.attr,
327         &dev_attr_deviceenabled.attr,
328         NULL
329 };
330
331 static struct attribute_group visorchipset_parahotplug_group = {
332         .name = "parahotplug",
333         .attrs = visorchipset_parahotplug_attrs
334 };
335
336 static const struct attribute_group *visorchipset_dev_groups[] = {
337         &visorchipset_install_group,
338         &visorchipset_guest_group,
339         &visorchipset_parahotplug_group,
340         NULL
341 };
342
343 /* /sys/devices/platform/visorchipset */
344 static struct platform_device visorchipset_platform_device = {
345         .name = "visorchipset",
346         .id = -1,
347         .dev.groups = visorchipset_dev_groups,
348 };
349
350 /* Function prototypes */
351 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
352                               int response);
353 static void controlvm_respond_chipset_init(
354                 struct controlvm_message_header *msg_hdr, int response,
355                 enum ultra_chipset_feature features);
356 static void controlvm_respond_physdev_changestate(
357                 struct controlvm_message_header *msg_hdr, int response,
358                 struct spar_segment_state state);
359
360
361 static void parser_done(struct parser_context *ctx);
362
363 static struct parser_context *
364 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
365 {
366         int allocbytes = sizeof(struct parser_context) + bytes;
367         struct parser_context *rc = NULL;
368         struct parser_context *ctx = NULL;
369
370         if (retry)
371                 *retry = false;
372
373         /*
374          * alloc an 0 extra byte to ensure payload is
375          * '\0'-terminated
376          */
377         allocbytes++;
378         if ((controlvm_payload_bytes_buffered + bytes)
379             > MAX_CONTROLVM_PAYLOAD_BYTES) {
380                 if (retry)
381                         *retry = true;
382                 rc = NULL;
383                 goto cleanup;
384         }
385         ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
386         if (!ctx) {
387                 if (retry)
388                         *retry = true;
389                 rc = NULL;
390                 goto cleanup;
391         }
392
393         ctx->allocbytes = allocbytes;
394         ctx->param_bytes = bytes;
395         ctx->curr = NULL;
396         ctx->bytes_remaining = 0;
397         ctx->byte_stream = false;
398         if (local) {
399                 void *p;
400
401                 if (addr > virt_to_phys(high_memory - 1)) {
402                         rc = NULL;
403                         goto cleanup;
404                 }
405                 p = __va((unsigned long) (addr));
406                 memcpy(ctx->data, p, bytes);
407         } else {
408                 void __iomem *mapping;
409
410                 if (!request_mem_region(addr, bytes, "visorchipset")) {
411                         rc = NULL;
412                         goto cleanup;
413                 }
414
415                 mapping = ioremap_cache(addr, bytes);
416                 if (!mapping) {
417                         release_mem_region(addr, bytes);
418                         rc = NULL;
419                         goto cleanup;
420                 }
421                 memcpy_fromio(ctx->data, mapping, bytes);
422                 release_mem_region(addr, bytes);
423         }
424
425         ctx->byte_stream = true;
426         rc = ctx;
427 cleanup:
428         if (rc) {
429                 controlvm_payload_bytes_buffered += ctx->param_bytes;
430         } else {
431                 if (ctx) {
432                         parser_done(ctx);
433                         ctx = NULL;
434                 }
435         }
436         return rc;
437 }
438
439 static uuid_le
440 parser_id_get(struct parser_context *ctx)
441 {
442         struct spar_controlvm_parameters_header *phdr = NULL;
443
444         if (ctx == NULL)
445                 return NULL_UUID_LE;
446         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
447         return phdr->id;
448 }
449
450 /** Describes the state from the perspective of which controlvm messages have
451  *  been received for a bus or device.
452  */
453
454 enum PARSER_WHICH_STRING {
455         PARSERSTRING_INITIATOR,
456         PARSERSTRING_TARGET,
457         PARSERSTRING_CONNECTION,
458         PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
459 };
460
461 static void
462 parser_param_start(struct parser_context *ctx,
463                    enum PARSER_WHICH_STRING which_string)
464 {
465         struct spar_controlvm_parameters_header *phdr = NULL;
466
467         if (ctx == NULL)
468                 goto Away;
469         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
470         switch (which_string) {
471         case PARSERSTRING_INITIATOR:
472                 ctx->curr = ctx->data + phdr->initiator_offset;
473                 ctx->bytes_remaining = phdr->initiator_length;
474                 break;
475         case PARSERSTRING_TARGET:
476                 ctx->curr = ctx->data + phdr->target_offset;
477                 ctx->bytes_remaining = phdr->target_length;
478                 break;
479         case PARSERSTRING_CONNECTION:
480                 ctx->curr = ctx->data + phdr->connection_offset;
481                 ctx->bytes_remaining = phdr->connection_length;
482                 break;
483         case PARSERSTRING_NAME:
484                 ctx->curr = ctx->data + phdr->name_offset;
485                 ctx->bytes_remaining = phdr->name_length;
486                 break;
487         default:
488                 break;
489         }
490
491 Away:
492         return;
493 }
494
495 static void parser_done(struct parser_context *ctx)
496 {
497         if (!ctx)
498                 return;
499         controlvm_payload_bytes_buffered -= ctx->param_bytes;
500         kfree(ctx);
501 }
502
503 static void *
504 parser_string_get(struct parser_context *ctx)
505 {
506         u8 *pscan;
507         unsigned long nscan;
508         int value_length = -1;
509         void *value = NULL;
510         int i;
511
512         if (!ctx)
513                 return NULL;
514         pscan = ctx->curr;
515         nscan = ctx->bytes_remaining;
516         if (nscan == 0)
517                 return NULL;
518         if (!pscan)
519                 return NULL;
520         for (i = 0, value_length = -1; i < nscan; i++)
521                 if (pscan[i] == '\0') {
522                         value_length = i;
523                         break;
524                 }
525         if (value_length < 0)   /* '\0' was not included in the length */
526                 value_length = nscan;
527         value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
528         if (value == NULL)
529                 return NULL;
530         if (value_length > 0)
531                 memcpy(value, pscan, value_length);
532         ((u8 *) (value))[value_length] = '\0';
533         return value;
534 }
535
536
537 static ssize_t toolaction_show(struct device *dev,
538                                struct device_attribute *attr,
539                                char *buf)
540 {
541         u8 tool_action;
542
543         visorchannel_read(controlvm_channel,
544                 offsetof(struct spar_controlvm_channel_protocol,
545                          tool_action), &tool_action, sizeof(u8));
546         return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
547 }
548
549 static ssize_t toolaction_store(struct device *dev,
550                                 struct device_attribute *attr,
551                                 const char *buf, size_t count)
552 {
553         u8 tool_action;
554         int ret;
555
556         if (kstrtou8(buf, 10, &tool_action))
557                 return -EINVAL;
558
559         ret = visorchannel_write(controlvm_channel,
560                 offsetof(struct spar_controlvm_channel_protocol,
561                          tool_action),
562                 &tool_action, sizeof(u8));
563
564         if (ret)
565                 return ret;
566         return count;
567 }
568
569 static ssize_t boottotool_show(struct device *dev,
570                                struct device_attribute *attr,
571                                char *buf)
572 {
573         struct efi_spar_indication efi_spar_indication;
574
575         visorchannel_read(controlvm_channel,
576                           offsetof(struct spar_controlvm_channel_protocol,
577                                    efi_spar_ind), &efi_spar_indication,
578                           sizeof(struct efi_spar_indication));
579         return scnprintf(buf, PAGE_SIZE, "%u\n",
580                          efi_spar_indication.boot_to_tool);
581 }
582
583 static ssize_t boottotool_store(struct device *dev,
584                                 struct device_attribute *attr,
585                                 const char *buf, size_t count)
586 {
587         int val, ret;
588         struct efi_spar_indication efi_spar_indication;
589
590         if (kstrtoint(buf, 10, &val))
591                 return -EINVAL;
592
593         efi_spar_indication.boot_to_tool = val;
594         ret = visorchannel_write(controlvm_channel,
595                         offsetof(struct spar_controlvm_channel_protocol,
596                                  efi_spar_ind), &(efi_spar_indication),
597                                  sizeof(struct efi_spar_indication));
598
599         if (ret)
600                 return ret;
601         return count;
602 }
603
604 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
605                           char *buf)
606 {
607         u32 error;
608
609         visorchannel_read(controlvm_channel,
610                           offsetof(struct spar_controlvm_channel_protocol,
611                                    installation_error),
612                           &error, sizeof(u32));
613         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
614 }
615
616 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
617                            const char *buf, size_t count)
618 {
619         u32 error;
620         int ret;
621
622         if (kstrtou32(buf, 10, &error))
623                 return -EINVAL;
624
625         ret = visorchannel_write(controlvm_channel,
626                 offsetof(struct spar_controlvm_channel_protocol,
627                          installation_error),
628                 &error, sizeof(u32));
629         if (ret)
630                 return ret;
631         return count;
632 }
633
634 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
635                            char *buf)
636 {
637         u32 text_id;
638
639         visorchannel_read(controlvm_channel,
640                           offsetof(struct spar_controlvm_channel_protocol,
641                                    installation_text_id),
642                           &text_id, sizeof(u32));
643         return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
644 }
645
646 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
647                             const char *buf, size_t count)
648 {
649         u32 text_id;
650         int ret;
651
652         if (kstrtou32(buf, 10, &text_id))
653                 return -EINVAL;
654
655         ret = visorchannel_write(controlvm_channel,
656                 offsetof(struct spar_controlvm_channel_protocol,
657                          installation_text_id),
658                 &text_id, sizeof(u32));
659         if (ret)
660                 return ret;
661         return count;
662 }
663
664 static ssize_t remaining_steps_show(struct device *dev,
665                                     struct device_attribute *attr, char *buf)
666 {
667         u16 remaining_steps;
668
669         visorchannel_read(controlvm_channel,
670                           offsetof(struct spar_controlvm_channel_protocol,
671                                    installation_remaining_steps),
672                           &remaining_steps, sizeof(u16));
673         return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
674 }
675
676 static ssize_t remaining_steps_store(struct device *dev,
677                                      struct device_attribute *attr,
678                                      const char *buf, size_t count)
679 {
680         u16 remaining_steps;
681         int ret;
682
683         if (kstrtou16(buf, 10, &remaining_steps))
684                 return -EINVAL;
685
686         ret = visorchannel_write(controlvm_channel,
687                 offsetof(struct spar_controlvm_channel_protocol,
688                          installation_remaining_steps),
689                 &remaining_steps, sizeof(u16));
690         if (ret)
691                 return ret;
692         return count;
693 }
694
695 static void
696 bus_info_clear(void *v)
697 {
698         struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
699
700         kfree(p->name);
701         kfree(p->description);
702         memset(p, 0, sizeof(struct visorchipset_bus_info));
703 }
704
705 static void
706 dev_info_clear(void *v)
707 {
708         struct visorchipset_device_info *p =
709                 (struct visorchipset_device_info *) v;
710
711         memset(p, 0, sizeof(struct visorchipset_device_info));
712 }
713
714 struct visor_busdev {
715         u32 bus_no;
716         u32 dev_no;
717 };
718
719 static int match_visorbus_dev_by_id(struct device *dev, void *data)
720 {
721         struct visor_device *vdev = to_visor_device(dev);
722         struct visor_busdev *id = (struct visor_busdev *)data;
723         u32 bus_no = id->bus_no;
724         u32 dev_no = id->dev_no;
725
726         if (((bus_no == -1) || (vdev->chipset_bus_no == bus_no)) &&
727             ((dev_no == -1) || (vdev->chipset_dev_no == dev_no)))
728                 return 1;
729
730         return 0;
731 }
732 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
733                                                struct visor_device *from)
734 {
735         struct device *dev;
736         struct device *dev_start = NULL;
737         struct visor_device *vdev = NULL;
738         struct visor_busdev id = {
739                         .bus_no = bus_no,
740                         .dev_no = dev_no
741                 };
742
743         if (from)
744                 dev_start = &from->device;
745         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
746                               match_visorbus_dev_by_id);
747         if (dev)
748                 vdev = to_visor_device(dev);
749         return vdev;
750 }
751 EXPORT_SYMBOL(visorbus_get_device_by_id);
752
753 static struct visorchipset_bus_info *
754 bus_find(struct list_head *list, u32 bus_no)
755 {
756         struct visorchipset_bus_info *p;
757
758         list_for_each_entry(p, list, entry) {
759                 if (p->bus_no == bus_no)
760                         return p;
761         }
762
763         return NULL;
764 }
765
766 static struct visorchipset_device_info *
767 device_find(struct list_head *list, u32 bus_no, u32 dev_no)
768 {
769         struct visorchipset_device_info *p;
770
771         list_for_each_entry(p, list, entry) {
772                 if (p->bus_no == bus_no && p->dev_no == dev_no)
773                         return p;
774         }
775
776         return NULL;
777 }
778
779 static void busdevices_del(struct list_head *list, u32 bus_no)
780 {
781         struct visorchipset_device_info *p, *tmp;
782
783         list_for_each_entry_safe(p, tmp, list, entry) {
784                 if (p->bus_no == bus_no) {
785                         list_del(&p->entry);
786                         kfree(p);
787                 }
788         }
789 }
790
791 static u8
792 check_chipset_events(void)
793 {
794         int i;
795         u8 send_msg = 1;
796         /* Check events to determine if response should be sent */
797         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
798                 send_msg &= chipset_events[i];
799         return send_msg;
800 }
801
802 static void
803 clear_chipset_events(void)
804 {
805         int i;
806         /* Clear chipset_events */
807         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
808                 chipset_events[i] = 0;
809 }
810
811 void
812 visorchipset_register_busdev(
813                         struct visorchipset_busdev_notifiers *notifiers,
814                         struct visorchipset_busdev_responders *responders,
815                         struct ultra_vbus_deviceinfo *driver_info)
816 {
817         down(&notifier_lock);
818         if (!notifiers) {
819                 memset(&busdev_notifiers, 0,
820                        sizeof(busdev_notifiers));
821                 visorbusregistered = 0; /* clear flag */
822         } else {
823                 busdev_notifiers = *notifiers;
824                 visorbusregistered = 1; /* set flag */
825         }
826         if (responders)
827                 *responders = busdev_responders;
828         if (driver_info)
829                 bus_device_info_init(driver_info, "chipset", "visorchipset",
830                                      VERSION, NULL);
831
832         up(&notifier_lock);
833 }
834 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
835
836 static void
837 cleanup_controlvm_structures(void)
838 {
839         struct visorchipset_bus_info *bi, *tmp_bi;
840         struct visorchipset_device_info *di, *tmp_di;
841
842         list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
843                 bus_info_clear(bi);
844                 list_del(&bi->entry);
845                 kfree(bi);
846         }
847
848         list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
849                 dev_info_clear(di);
850                 list_del(&di->entry);
851                 kfree(di);
852         }
853 }
854
855 static void
856 chipset_init(struct controlvm_message *inmsg)
857 {
858         static int chipset_inited;
859         enum ultra_chipset_feature features = 0;
860         int rc = CONTROLVM_RESP_SUCCESS;
861
862         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
863         if (chipset_inited) {
864                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
865                 goto cleanup;
866         }
867         chipset_inited = 1;
868         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
869
870         /* Set features to indicate we support parahotplug (if Command
871          * also supports it). */
872         features =
873             inmsg->cmd.init_chipset.
874             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
875
876         /* Set the "reply" bit so Command knows this is a
877          * features-aware driver. */
878         features |= ULTRA_CHIPSET_FEATURE_REPLY;
879
880 cleanup:
881         if (rc < 0)
882                 cleanup_controlvm_structures();
883         if (inmsg->hdr.flags.response_expected)
884                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
885 }
886
887 static void
888 controlvm_init_response(struct controlvm_message *msg,
889                         struct controlvm_message_header *msg_hdr, int response)
890 {
891         memset(msg, 0, sizeof(struct controlvm_message));
892         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
893         msg->hdr.payload_bytes = 0;
894         msg->hdr.payload_vm_offset = 0;
895         msg->hdr.payload_max_bytes = 0;
896         if (response < 0) {
897                 msg->hdr.flags.failed = 1;
898                 msg->hdr.completion_status = (u32) (-response);
899         }
900 }
901
902 static void
903 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
904 {
905         struct controlvm_message outmsg;
906
907         controlvm_init_response(&outmsg, msg_hdr, response);
908         /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
909         * back the deviceChangeState structure in the packet. */
910         if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
911             g_devicechangestate_packet.device_change_state.bus_no ==
912             g_diagpool_bus_no &&
913             g_devicechangestate_packet.device_change_state.dev_no ==
914             g_diagpool_dev_no)
915                 outmsg.cmd = g_devicechangestate_packet;
916         if (outmsg.hdr.flags.test_message == 1)
917                 return;
918
919         if (!visorchannel_signalinsert(controlvm_channel,
920                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
921                 return;
922         }
923 }
924
925 static void
926 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
927                                int response,
928                                enum ultra_chipset_feature features)
929 {
930         struct controlvm_message outmsg;
931
932         controlvm_init_response(&outmsg, msg_hdr, response);
933         outmsg.cmd.init_chipset.features = features;
934         if (!visorchannel_signalinsert(controlvm_channel,
935                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
936                 return;
937         }
938 }
939
940 static void controlvm_respond_physdev_changestate(
941                 struct controlvm_message_header *msg_hdr, int response,
942                 struct spar_segment_state state)
943 {
944         struct controlvm_message outmsg;
945
946         controlvm_init_response(&outmsg, msg_hdr, response);
947         outmsg.cmd.device_change_state.state = state;
948         outmsg.cmd.device_change_state.flags.phys_device = 1;
949         if (!visorchannel_signalinsert(controlvm_channel,
950                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
951                 return;
952         }
953 }
954
955 enum crash_obj_type {
956         CRASH_DEV,
957         CRASH_BUS,
958 };
959
960 static void
961 bus_responder(enum controlvm_id cmd_id, struct visorchipset_bus_info *p,
962               int response)
963 {
964         bool need_clear = false;
965         u32 bus_no = p->bus_no;
966
967         if (!p)
968                 return;
969
970         if (response < 0) {
971                 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
972                     (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
973                         /* undo the row we just created... */
974                         busdevices_del(&dev_info_list, bus_no);
975         } else {
976                 if (cmd_id == CONTROLVM_BUS_CREATE)
977                         p->state.created = 1;
978                 if (cmd_id == CONTROLVM_BUS_DESTROY)
979                         need_clear = true;
980         }
981
982         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
983                 return;         /* no controlvm response needed */
984         if (p->pending_msg_hdr.id != (u32)cmd_id)
985                 return;
986         controlvm_respond(&p->pending_msg_hdr, response);
987         p->pending_msg_hdr.id = CONTROLVM_INVALID;
988         if (need_clear) {
989                 bus_info_clear(p);
990                 busdevices_del(&dev_info_list, bus_no);
991         }
992 }
993
994 static void
995 device_changestate_responder(enum controlvm_id cmd_id,
996                              u32 bus_no, u32 dev_no, int response,
997                              struct spar_segment_state response_state)
998 {
999         struct visorchipset_device_info *p;
1000         struct controlvm_message outmsg;
1001
1002         p = device_find(&dev_info_list, bus_no, dev_no);
1003         if (!p)
1004                 return;
1005         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1006                 return;         /* no controlvm response needed */
1007         if (p->pending_msg_hdr.id != cmd_id)
1008                 return;
1009
1010         controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
1011
1012         outmsg.cmd.device_change_state.bus_no = bus_no;
1013         outmsg.cmd.device_change_state.dev_no = dev_no;
1014         outmsg.cmd.device_change_state.state = response_state;
1015
1016         if (!visorchannel_signalinsert(controlvm_channel,
1017                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
1018                 return;
1019
1020         p->pending_msg_hdr.id = CONTROLVM_INVALID;
1021 }
1022
1023 static void
1024 device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
1025 {
1026         struct visorchipset_device_info *p;
1027         bool need_clear = false;
1028
1029         p = device_find(&dev_info_list, bus_no, dev_no);
1030         if (!p)
1031                 return;
1032         if (response >= 0) {
1033                 if (cmd_id == CONTROLVM_DEVICE_CREATE)
1034                         p->state.created = 1;
1035                 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
1036                         need_clear = true;
1037         }
1038
1039         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1040                 return;         /* no controlvm response needed */
1041
1042         if (p->pending_msg_hdr.id != (u32)cmd_id)
1043                 return;
1044
1045         controlvm_respond(&p->pending_msg_hdr, response);
1046         p->pending_msg_hdr.id = CONTROLVM_INVALID;
1047         if (need_clear)
1048                 dev_info_clear(p);
1049 }
1050
1051 static void
1052 bus_epilog(struct visorchipset_bus_info *bus_info,
1053            u32 cmd, struct controlvm_message_header *msg_hdr,
1054            int response, bool need_response)
1055 {
1056         bool notified = false;
1057
1058         if (!bus_info)
1059                 return;
1060
1061         if (need_response) {
1062                 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
1063                        sizeof(struct controlvm_message_header));
1064         } else {
1065                 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1066         }
1067
1068         down(&notifier_lock);
1069         if (response == CONTROLVM_RESP_SUCCESS) {
1070                 switch (cmd) {
1071                 case CONTROLVM_BUS_CREATE:
1072                         if (busdev_notifiers.bus_create) {
1073                                 (*busdev_notifiers.bus_create) (bus_info);
1074                                 notified = true;
1075                         }
1076                         break;
1077                 case CONTROLVM_BUS_DESTROY:
1078                         if (busdev_notifiers.bus_destroy) {
1079                                 (*busdev_notifiers.bus_destroy) (bus_info);
1080                                 notified = true;
1081                         }
1082                         break;
1083                 }
1084         }
1085         if (notified)
1086                 /* The callback function just called above is responsible
1087                  * for calling the appropriate visorchipset_busdev_responders
1088                  * function, which will call bus_responder()
1089                  */
1090                 ;
1091         else
1092                 bus_responder(cmd, bus_info, response);
1093         up(&notifier_lock);
1094 }
1095
1096 static void
1097 device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
1098               struct controlvm_message_header *msg_hdr, int response,
1099               bool need_response, bool for_visorbus)
1100 {
1101         struct visorchipset_busdev_notifiers *notifiers;
1102         bool notified = false;
1103
1104         struct visorchipset_device_info *dev_info =
1105                 device_find(&dev_info_list, bus_no, dev_no);
1106         char *envp[] = {
1107                 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1108                 NULL
1109         };
1110
1111         if (!dev_info)
1112                 return;
1113
1114         notifiers = &busdev_notifiers;
1115
1116         if (need_response) {
1117                 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
1118                        sizeof(struct controlvm_message_header));
1119         } else {
1120                 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1121         }
1122
1123         down(&notifier_lock);
1124         if (response >= 0) {
1125                 switch (cmd) {
1126                 case CONTROLVM_DEVICE_CREATE:
1127                         if (notifiers->device_create) {
1128                                 (*notifiers->device_create) (bus_no, dev_no);
1129                                 notified = true;
1130                         }
1131                         break;
1132                 case CONTROLVM_DEVICE_CHANGESTATE:
1133                         /* ServerReady / ServerRunning / SegmentStateRunning */
1134                         if (state.alive == segment_state_running.alive &&
1135                             state.operating ==
1136                                 segment_state_running.operating) {
1137                                 if (notifiers->device_resume) {
1138                                         (*notifiers->device_resume) (bus_no,
1139                                                                      dev_no);
1140                                         notified = true;
1141                                 }
1142                         }
1143                         /* ServerNotReady / ServerLost / SegmentStateStandby */
1144                         else if (state.alive == segment_state_standby.alive &&
1145                                  state.operating ==
1146                                  segment_state_standby.operating) {
1147                                 /* technically this is standby case
1148                                  * where server is lost
1149                                  */
1150                                 if (notifiers->device_pause) {
1151                                         (*notifiers->device_pause) (bus_no,
1152                                                                     dev_no);
1153                                         notified = true;
1154                                 }
1155                         } else if (state.alive == segment_state_paused.alive &&
1156                                    state.operating ==
1157                                    segment_state_paused.operating) {
1158                                 /* this is lite pause where channel is
1159                                  * still valid just 'pause' of it
1160                                  */
1161                                 if (bus_no == g_diagpool_bus_no &&
1162                                     dev_no == g_diagpool_dev_no) {
1163                                         /* this will trigger the
1164                                          * diag_shutdown.sh script in
1165                                          * the visorchipset hotplug */
1166                                         kobject_uevent_env
1167                                             (&visorchipset_platform_device.dev.
1168                                              kobj, KOBJ_ONLINE, envp);
1169                                 }
1170                         }
1171                         break;
1172                 case CONTROLVM_DEVICE_DESTROY:
1173                         if (notifiers->device_destroy) {
1174                                 (*notifiers->device_destroy) (bus_no, dev_no);
1175                                 notified = true;
1176                         }
1177                         break;
1178                 }
1179         }
1180         if (notified)
1181                 /* The callback function just called above is responsible
1182                  * for calling the appropriate visorchipset_busdev_responders
1183                  * function, which will call device_responder()
1184                  */
1185                 ;
1186         else
1187                 device_responder(cmd, bus_no, dev_no, response);
1188         up(&notifier_lock);
1189 }
1190
1191 static void
1192 bus_create(struct controlvm_message *inmsg)
1193 {
1194         struct controlvm_message_packet *cmd = &inmsg->cmd;
1195         u32 bus_no = cmd->create_bus.bus_no;
1196         int rc = CONTROLVM_RESP_SUCCESS;
1197         struct visorchipset_bus_info *bus_info;
1198
1199         bus_info = bus_find(&bus_info_list, bus_no);
1200         if (bus_info && (bus_info->state.created == 1)) {
1201                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1202                                  POSTCODE_SEVERITY_ERR);
1203                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1204                 goto cleanup;
1205         }
1206         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1207         if (!bus_info) {
1208                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1209                                  POSTCODE_SEVERITY_ERR);
1210                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1211                 goto cleanup;
1212         }
1213
1214         INIT_LIST_HEAD(&bus_info->entry);
1215         bus_info->bus_no = bus_no;
1216
1217         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1218
1219         if (inmsg->hdr.flags.test_message == 1)
1220                 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1221         else
1222                 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1223
1224         bus_info->flags.server = inmsg->hdr.flags.server;
1225         bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1226         bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1227         bus_info->chan_info.channel_type_uuid =
1228                         cmd->create_bus.bus_data_type_uuid;
1229         bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1230
1231         list_add(&bus_info->entry, &bus_info_list);
1232
1233         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1234
1235 cleanup:
1236         bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1237                    rc, inmsg->hdr.flags.response_expected == 1);
1238 }
1239
1240 static void
1241 bus_destroy(struct controlvm_message *inmsg)
1242 {
1243         struct controlvm_message_packet *cmd = &inmsg->cmd;
1244         u32 bus_no = cmd->destroy_bus.bus_no;
1245         struct visorchipset_bus_info *bus_info;
1246         int rc = CONTROLVM_RESP_SUCCESS;
1247
1248         bus_info = bus_find(&bus_info_list, bus_no);
1249         if (!bus_info)
1250                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1251         else if (bus_info->state.created == 0)
1252                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1253
1254         bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1255                    rc, inmsg->hdr.flags.response_expected == 1);
1256 }
1257
1258 static void
1259 bus_configure(struct controlvm_message *inmsg,
1260               struct parser_context *parser_ctx)
1261 {
1262         struct controlvm_message_packet *cmd = &inmsg->cmd;
1263         u32 bus_no;
1264         struct visorchipset_bus_info *bus_info;
1265         int rc = CONTROLVM_RESP_SUCCESS;
1266         char s[99];
1267
1268         bus_no = cmd->configure_bus.bus_no;
1269         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1270                          POSTCODE_SEVERITY_INFO);
1271
1272         bus_info = bus_find(&bus_info_list, bus_no);
1273         if (!bus_info) {
1274                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1275                                  POSTCODE_SEVERITY_ERR);
1276                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1277         } else if (bus_info->state.created == 0) {
1278                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1279                                  POSTCODE_SEVERITY_ERR);
1280                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1281         } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1282                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1283                                  POSTCODE_SEVERITY_ERR);
1284                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1285         } else {
1286                 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1287                 bus_info->partition_uuid = parser_id_get(parser_ctx);
1288                 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1289                 bus_info->name = parser_string_get(parser_ctx);
1290
1291                 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1292                 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1293                                  POSTCODE_SEVERITY_INFO);
1294         }
1295         bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1296                    rc, inmsg->hdr.flags.response_expected == 1);
1297 }
1298
1299 static void
1300 my_device_create(struct controlvm_message *inmsg)
1301 {
1302         struct controlvm_message_packet *cmd = &inmsg->cmd;
1303         u32 bus_no = cmd->create_device.bus_no;
1304         u32 dev_no = cmd->create_device.dev_no;
1305         struct visorchipset_device_info *dev_info;
1306         struct visorchipset_bus_info *bus_info;
1307         int rc = CONTROLVM_RESP_SUCCESS;
1308
1309         dev_info = device_find(&dev_info_list, bus_no, dev_no);
1310         if (dev_info && (dev_info->state.created == 1)) {
1311                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1312                                  POSTCODE_SEVERITY_ERR);
1313                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1314                 goto cleanup;
1315         }
1316         bus_info = bus_find(&bus_info_list, bus_no);
1317         if (!bus_info) {
1318                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1319                                  POSTCODE_SEVERITY_ERR);
1320                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1321                 goto cleanup;
1322         }
1323         if (bus_info->state.created == 0) {
1324                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1325                                  POSTCODE_SEVERITY_ERR);
1326                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1327                 goto cleanup;
1328         }
1329         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1330         if (!dev_info) {
1331                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1332                                  POSTCODE_SEVERITY_ERR);
1333                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1334                 goto cleanup;
1335         }
1336
1337         INIT_LIST_HEAD(&dev_info->entry);
1338         dev_info->bus_no = bus_no;
1339         dev_info->dev_no = dev_no;
1340         dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1341         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1342                          POSTCODE_SEVERITY_INFO);
1343
1344         if (inmsg->hdr.flags.test_message == 1)
1345                 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1346         else
1347                 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1348         dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1349         dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1350         dev_info->chan_info.channel_type_uuid =
1351                         cmd->create_device.data_type_uuid;
1352         list_add(&dev_info->entry, &dev_info_list);
1353         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1354                          POSTCODE_SEVERITY_INFO);
1355 cleanup:
1356         /* get the bus and devNo for DiagPool channel */
1357         if (dev_info &&
1358             is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1359                 g_diagpool_bus_no = bus_no;
1360                 g_diagpool_dev_no = dev_no;
1361         }
1362         device_epilog(bus_no, dev_no, segment_state_running,
1363                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1364                       inmsg->hdr.flags.response_expected == 1, 1);
1365 }
1366
1367 static void
1368 my_device_changestate(struct controlvm_message *inmsg)
1369 {
1370         struct controlvm_message_packet *cmd = &inmsg->cmd;
1371         u32 bus_no = cmd->device_change_state.bus_no;
1372         u32 dev_no = cmd->device_change_state.dev_no;
1373         struct spar_segment_state state = cmd->device_change_state.state;
1374         struct visorchipset_device_info *dev_info;
1375         int rc = CONTROLVM_RESP_SUCCESS;
1376
1377         dev_info = device_find(&dev_info_list, bus_no, dev_no);
1378         if (!dev_info) {
1379                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1380                                  POSTCODE_SEVERITY_ERR);
1381                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1382         } else if (dev_info->state.created == 0) {
1383                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1384                                  POSTCODE_SEVERITY_ERR);
1385                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1386         }
1387         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1388                 device_epilog(bus_no, dev_no, state,
1389                               CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1390                               inmsg->hdr.flags.response_expected == 1, 1);
1391 }
1392
1393 static void
1394 my_device_destroy(struct controlvm_message *inmsg)
1395 {
1396         struct controlvm_message_packet *cmd = &inmsg->cmd;
1397         u32 bus_no = cmd->destroy_device.bus_no;
1398         u32 dev_no = cmd->destroy_device.dev_no;
1399         struct visorchipset_device_info *dev_info;
1400         int rc = CONTROLVM_RESP_SUCCESS;
1401
1402         dev_info = device_find(&dev_info_list, bus_no, dev_no);
1403         if (!dev_info)
1404                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1405         else if (dev_info->state.created == 0)
1406                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1407
1408         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1409                 device_epilog(bus_no, dev_no, segment_state_running,
1410                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1411                               inmsg->hdr.flags.response_expected == 1, 1);
1412 }
1413
1414 /* When provided with the physical address of the controlvm channel
1415  * (phys_addr), the offset to the payload area we need to manage
1416  * (offset), and the size of this payload area (bytes), fills in the
1417  * controlvm_payload_info struct.  Returns true for success or false
1418  * for failure.
1419  */
1420 static int
1421 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1422                                   struct visor_controlvm_payload_info *info)
1423 {
1424         u8 __iomem *payload = NULL;
1425         int rc = CONTROLVM_RESP_SUCCESS;
1426
1427         if (!info) {
1428                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1429                 goto cleanup;
1430         }
1431         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1432         if ((offset == 0) || (bytes == 0)) {
1433                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1434                 goto cleanup;
1435         }
1436         payload = ioremap_cache(phys_addr + offset, bytes);
1437         if (!payload) {
1438                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1439                 goto cleanup;
1440         }
1441
1442         info->offset = offset;
1443         info->bytes = bytes;
1444         info->ptr = payload;
1445
1446 cleanup:
1447         if (rc < 0) {
1448                 if (payload) {
1449                         iounmap(payload);
1450                         payload = NULL;
1451                 }
1452         }
1453         return rc;
1454 }
1455
1456 static void
1457 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1458 {
1459         if (info->ptr) {
1460                 iounmap(info->ptr);
1461                 info->ptr = NULL;
1462         }
1463         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1464 }
1465
1466 static void
1467 initialize_controlvm_payload(void)
1468 {
1469         u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1470         u64 payload_offset = 0;
1471         u32 payload_bytes = 0;
1472
1473         if (visorchannel_read(controlvm_channel,
1474                               offsetof(struct spar_controlvm_channel_protocol,
1475                                        request_payload_offset),
1476                               &payload_offset, sizeof(payload_offset)) < 0) {
1477                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1478                                  POSTCODE_SEVERITY_ERR);
1479                 return;
1480         }
1481         if (visorchannel_read(controlvm_channel,
1482                               offsetof(struct spar_controlvm_channel_protocol,
1483                                        request_payload_bytes),
1484                               &payload_bytes, sizeof(payload_bytes)) < 0) {
1485                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1486                                  POSTCODE_SEVERITY_ERR);
1487                 return;
1488         }
1489         initialize_controlvm_payload_info(phys_addr,
1490                                           payload_offset, payload_bytes,
1491                                           &controlvm_payload_info);
1492 }
1493
1494 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1495  *  Returns CONTROLVM_RESP_xxx code.
1496  */
1497 static int
1498 visorchipset_chipset_ready(void)
1499 {
1500         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1501         return CONTROLVM_RESP_SUCCESS;
1502 }
1503
1504 static int
1505 visorchipset_chipset_selftest(void)
1506 {
1507         char env_selftest[20];
1508         char *envp[] = { env_selftest, NULL };
1509
1510         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1511         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1512                            envp);
1513         return CONTROLVM_RESP_SUCCESS;
1514 }
1515
1516 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1517  *  Returns CONTROLVM_RESP_xxx code.
1518  */
1519 static int
1520 visorchipset_chipset_notready(void)
1521 {
1522         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1523         return CONTROLVM_RESP_SUCCESS;
1524 }
1525
1526 static void
1527 chipset_ready(struct controlvm_message_header *msg_hdr)
1528 {
1529         int rc = visorchipset_chipset_ready();
1530
1531         if (rc != CONTROLVM_RESP_SUCCESS)
1532                 rc = -rc;
1533         if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1534                 controlvm_respond(msg_hdr, rc);
1535         if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1536                 /* Send CHIPSET_READY response when all modules have been loaded
1537                  * and disks mounted for the partition
1538                  */
1539                 g_chipset_msg_hdr = *msg_hdr;
1540         }
1541 }
1542
1543 static void
1544 chipset_selftest(struct controlvm_message_header *msg_hdr)
1545 {
1546         int rc = visorchipset_chipset_selftest();
1547
1548         if (rc != CONTROLVM_RESP_SUCCESS)
1549                 rc = -rc;
1550         if (msg_hdr->flags.response_expected)
1551                 controlvm_respond(msg_hdr, rc);
1552 }
1553
1554 static void
1555 chipset_notready(struct controlvm_message_header *msg_hdr)
1556 {
1557         int rc = visorchipset_chipset_notready();
1558
1559         if (rc != CONTROLVM_RESP_SUCCESS)
1560                 rc = -rc;
1561         if (msg_hdr->flags.response_expected)
1562                 controlvm_respond(msg_hdr, rc);
1563 }
1564
1565 /* This is your "one-stop" shop for grabbing the next message from the
1566  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1567  */
1568 static bool
1569 read_controlvm_event(struct controlvm_message *msg)
1570 {
1571         if (visorchannel_signalremove(controlvm_channel,
1572                                       CONTROLVM_QUEUE_EVENT, msg)) {
1573                 /* got a message */
1574                 if (msg->hdr.flags.test_message == 1)
1575                         return false;
1576                 return true;
1577         }
1578         return false;
1579 }
1580
1581 /*
1582  * The general parahotplug flow works as follows.  The visorchipset
1583  * driver receives a DEVICE_CHANGESTATE message from Command
1584  * specifying a physical device to enable or disable.  The CONTROLVM
1585  * message handler calls parahotplug_process_message, which then adds
1586  * the message to a global list and kicks off a udev event which
1587  * causes a user level script to enable or disable the specified
1588  * device.  The udev script then writes to
1589  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1590  * to get called, at which point the appropriate CONTROLVM message is
1591  * retrieved from the list and responded to.
1592  */
1593
1594 #define PARAHOTPLUG_TIMEOUT_MS 2000
1595
1596 /*
1597  * Generate unique int to match an outstanding CONTROLVM message with a
1598  * udev script /proc response
1599  */
1600 static int
1601 parahotplug_next_id(void)
1602 {
1603         static atomic_t id = ATOMIC_INIT(0);
1604
1605         return atomic_inc_return(&id);
1606 }
1607
1608 /*
1609  * Returns the time (in jiffies) when a CONTROLVM message on the list
1610  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1611  */
1612 static unsigned long
1613 parahotplug_next_expiration(void)
1614 {
1615         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1616 }
1617
1618 /*
1619  * Create a parahotplug_request, which is basically a wrapper for a
1620  * CONTROLVM_MESSAGE that we can stick on a list
1621  */
1622 static struct parahotplug_request *
1623 parahotplug_request_create(struct controlvm_message *msg)
1624 {
1625         struct parahotplug_request *req;
1626
1627         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1628         if (!req)
1629                 return NULL;
1630
1631         req->id = parahotplug_next_id();
1632         req->expiration = parahotplug_next_expiration();
1633         req->msg = *msg;
1634
1635         return req;
1636 }
1637
1638 /*
1639  * Free a parahotplug_request.
1640  */
1641 static void
1642 parahotplug_request_destroy(struct parahotplug_request *req)
1643 {
1644         kfree(req);
1645 }
1646
1647 /*
1648  * Cause uevent to run the user level script to do the disable/enable
1649  * specified in (the CONTROLVM message in) the specified
1650  * parahotplug_request
1651  */
1652 static void
1653 parahotplug_request_kickoff(struct parahotplug_request *req)
1654 {
1655         struct controlvm_message_packet *cmd = &req->msg.cmd;
1656         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1657             env_func[40];
1658         char *envp[] = {
1659                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1660         };
1661
1662         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1663         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1664         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1665                 cmd->device_change_state.state.active);
1666         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1667                 cmd->device_change_state.bus_no);
1668         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1669                 cmd->device_change_state.dev_no >> 3);
1670         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1671                 cmd->device_change_state.dev_no & 0x7);
1672
1673         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1674                            envp);
1675 }
1676
1677 /*
1678  * Remove any request from the list that's been on there too long and
1679  * respond with an error.
1680  */
1681 static void
1682 parahotplug_process_list(void)
1683 {
1684         struct list_head *pos;
1685         struct list_head *tmp;
1686
1687         spin_lock(&parahotplug_request_list_lock);
1688
1689         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1690                 struct parahotplug_request *req =
1691                     list_entry(pos, struct parahotplug_request, list);
1692
1693                 if (!time_after_eq(jiffies, req->expiration))
1694                         continue;
1695
1696                 list_del(pos);
1697                 if (req->msg.hdr.flags.response_expected)
1698                         controlvm_respond_physdev_changestate(
1699                                 &req->msg.hdr,
1700                                 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1701                                 req->msg.cmd.device_change_state.state);
1702                 parahotplug_request_destroy(req);
1703         }
1704
1705         spin_unlock(&parahotplug_request_list_lock);
1706 }
1707
1708 /*
1709  * Called from the /proc handler, which means the user script has
1710  * finished the enable/disable.  Find the matching identifier, and
1711  * respond to the CONTROLVM message with success.
1712  */
1713 static int
1714 parahotplug_request_complete(int id, u16 active)
1715 {
1716         struct list_head *pos;
1717         struct list_head *tmp;
1718
1719         spin_lock(&parahotplug_request_list_lock);
1720
1721         /* Look for a request matching "id". */
1722         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1723                 struct parahotplug_request *req =
1724                     list_entry(pos, struct parahotplug_request, list);
1725                 if (req->id == id) {
1726                         /* Found a match.  Remove it from the list and
1727                          * respond.
1728                          */
1729                         list_del(pos);
1730                         spin_unlock(&parahotplug_request_list_lock);
1731                         req->msg.cmd.device_change_state.state.active = active;
1732                         if (req->msg.hdr.flags.response_expected)
1733                                 controlvm_respond_physdev_changestate(
1734                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1735                                         req->msg.cmd.device_change_state.state);
1736                         parahotplug_request_destroy(req);
1737                         return 0;
1738                 }
1739         }
1740
1741         spin_unlock(&parahotplug_request_list_lock);
1742         return -1;
1743 }
1744
1745 /*
1746  * Enables or disables a PCI device by kicking off a udev script
1747  */
1748 static void
1749 parahotplug_process_message(struct controlvm_message *inmsg)
1750 {
1751         struct parahotplug_request *req;
1752
1753         req = parahotplug_request_create(inmsg);
1754
1755         if (!req)
1756                 return;
1757
1758         if (inmsg->cmd.device_change_state.state.active) {
1759                 /* For enable messages, just respond with success
1760                 * right away.  This is a bit of a hack, but there are
1761                 * issues with the early enable messages we get (with
1762                 * either the udev script not detecting that the device
1763                 * is up, or not getting called at all).  Fortunately
1764                 * the messages that get lost don't matter anyway, as
1765                 * devices are automatically enabled at
1766                 * initialization.
1767                 */
1768                 parahotplug_request_kickoff(req);
1769                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1770                         CONTROLVM_RESP_SUCCESS,
1771                         inmsg->cmd.device_change_state.state);
1772                 parahotplug_request_destroy(req);
1773         } else {
1774                 /* For disable messages, add the request to the
1775                 * request list before kicking off the udev script.  It
1776                 * won't get responded to until the script has
1777                 * indicated it's done.
1778                 */
1779                 spin_lock(&parahotplug_request_list_lock);
1780                 list_add_tail(&req->list, &parahotplug_request_list);
1781                 spin_unlock(&parahotplug_request_list_lock);
1782
1783                 parahotplug_request_kickoff(req);
1784         }
1785 }
1786
1787 /* Process a controlvm message.
1788  * Return result:
1789  *    false - this function will return false only in the case where the
1790  *            controlvm message was NOT processed, but processing must be
1791  *            retried before reading the next controlvm message; a
1792  *            scenario where this can occur is when we need to throttle
1793  *            the allocation of memory in which to copy out controlvm
1794  *            payload data
1795  *    true  - processing of the controlvm message completed,
1796  *            either successfully or with an error.
1797  */
1798 static bool
1799 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1800 {
1801         struct controlvm_message_packet *cmd = &inmsg.cmd;
1802         u64 parm_addr;
1803         u32 parm_bytes;
1804         struct parser_context *parser_ctx = NULL;
1805         bool local_addr;
1806         struct controlvm_message ackmsg;
1807
1808         /* create parsing context if necessary */
1809         local_addr = (inmsg.hdr.flags.test_message == 1);
1810         if (channel_addr == 0)
1811                 return true;
1812         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1813         parm_bytes = inmsg.hdr.payload_bytes;
1814
1815         /* Parameter and channel addresses within test messages actually lie
1816          * within our OS-controlled memory.  We need to know that, because it
1817          * makes a difference in how we compute the virtual address.
1818          */
1819         if (parm_addr && parm_bytes) {
1820                 bool retry = false;
1821
1822                 parser_ctx =
1823                     parser_init_byte_stream(parm_addr, parm_bytes,
1824                                             local_addr, &retry);
1825                 if (!parser_ctx && retry)
1826                         return false;
1827         }
1828
1829         if (!local_addr) {
1830                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1831                                         CONTROLVM_RESP_SUCCESS);
1832                 if (controlvm_channel)
1833                         visorchannel_signalinsert(controlvm_channel,
1834                                                   CONTROLVM_QUEUE_ACK,
1835                                                   &ackmsg);
1836         }
1837         switch (inmsg.hdr.id) {
1838         case CONTROLVM_CHIPSET_INIT:
1839                 chipset_init(&inmsg);
1840                 break;
1841         case CONTROLVM_BUS_CREATE:
1842                 bus_create(&inmsg);
1843                 break;
1844         case CONTROLVM_BUS_DESTROY:
1845                 bus_destroy(&inmsg);
1846                 break;
1847         case CONTROLVM_BUS_CONFIGURE:
1848                 bus_configure(&inmsg, parser_ctx);
1849                 break;
1850         case CONTROLVM_DEVICE_CREATE:
1851                 my_device_create(&inmsg);
1852                 break;
1853         case CONTROLVM_DEVICE_CHANGESTATE:
1854                 if (cmd->device_change_state.flags.phys_device) {
1855                         parahotplug_process_message(&inmsg);
1856                 } else {
1857                         /* save the hdr and cmd structures for later use */
1858                         /* when sending back the response to Command */
1859                         my_device_changestate(&inmsg);
1860                         g_devicechangestate_packet = inmsg.cmd;
1861                         break;
1862                 }
1863                 break;
1864         case CONTROLVM_DEVICE_DESTROY:
1865                 my_device_destroy(&inmsg);
1866                 break;
1867         case CONTROLVM_DEVICE_CONFIGURE:
1868                 /* no op for now, just send a respond that we passed */
1869                 if (inmsg.hdr.flags.response_expected)
1870                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1871                 break;
1872         case CONTROLVM_CHIPSET_READY:
1873                 chipset_ready(&inmsg.hdr);
1874                 break;
1875         case CONTROLVM_CHIPSET_SELFTEST:
1876                 chipset_selftest(&inmsg.hdr);
1877                 break;
1878         case CONTROLVM_CHIPSET_STOP:
1879                 chipset_notready(&inmsg.hdr);
1880                 break;
1881         default:
1882                 if (inmsg.hdr.flags.response_expected)
1883                         controlvm_respond(&inmsg.hdr,
1884                                 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1885                 break;
1886         }
1887
1888         if (parser_ctx) {
1889                 parser_done(parser_ctx);
1890                 parser_ctx = NULL;
1891         }
1892         return true;
1893 }
1894
1895 static inline unsigned int
1896 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1897 {
1898         struct vmcall_io_controlvm_addr_params params;
1899         int result = VMCALL_SUCCESS;
1900         u64 physaddr;
1901
1902         physaddr = virt_to_phys(&params);
1903         ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1904         if (VMCALL_SUCCESSFUL(result)) {
1905                 *control_addr = params.address;
1906                 *control_bytes = params.channel_bytes;
1907         }
1908         return result;
1909 }
1910
1911 static u64 controlvm_get_channel_address(void)
1912 {
1913         u64 addr = 0;
1914         u32 size = 0;
1915
1916         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1917                 return 0;
1918
1919         return addr;
1920 }
1921
1922 static void
1923 controlvm_periodic_work(struct work_struct *work)
1924 {
1925         struct controlvm_message inmsg;
1926         bool got_command = false;
1927         bool handle_command_failed = false;
1928         static u64 poll_count;
1929
1930         /* make sure visorbus server is registered for controlvm callbacks */
1931         if (visorchipset_visorbusregwait && !visorbusregistered)
1932                 goto cleanup;
1933
1934         poll_count++;
1935         if (poll_count >= 250)
1936                 ;       /* keep going */
1937         else
1938                 goto cleanup;
1939
1940         /* Check events to determine if response to CHIPSET_READY
1941          * should be sent
1942          */
1943         if (visorchipset_holdchipsetready &&
1944             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1945                 if (check_chipset_events() == 1) {
1946                         controlvm_respond(&g_chipset_msg_hdr, 0);
1947                         clear_chipset_events();
1948                         memset(&g_chipset_msg_hdr, 0,
1949                                sizeof(struct controlvm_message_header));
1950                 }
1951         }
1952
1953         while (visorchannel_signalremove(controlvm_channel,
1954                                          CONTROLVM_QUEUE_RESPONSE,
1955                                          &inmsg))
1956                 ;
1957         if (!got_command) {
1958                 if (controlvm_pending_msg_valid) {
1959                         /* we throttled processing of a prior
1960                         * msg, so try to process it again
1961                         * rather than reading a new one
1962                         */
1963                         inmsg = controlvm_pending_msg;
1964                         controlvm_pending_msg_valid = false;
1965                         got_command = true;
1966                 } else {
1967                         got_command = read_controlvm_event(&inmsg);
1968                 }
1969         }
1970
1971         handle_command_failed = false;
1972         while (got_command && (!handle_command_failed)) {
1973                 most_recent_message_jiffies = jiffies;
1974                 if (handle_command(inmsg,
1975                                    visorchannel_get_physaddr
1976                                    (controlvm_channel)))
1977                         got_command = read_controlvm_event(&inmsg);
1978                 else {
1979                         /* this is a scenario where throttling
1980                         * is required, but probably NOT an
1981                         * error...; we stash the current
1982                         * controlvm msg so we will attempt to
1983                         * reprocess it on our next loop
1984                         */
1985                         handle_command_failed = true;
1986                         controlvm_pending_msg = inmsg;
1987                         controlvm_pending_msg_valid = true;
1988                 }
1989         }
1990
1991         /* parahotplug_worker */
1992         parahotplug_process_list();
1993
1994 cleanup:
1995
1996         if (time_after(jiffies,
1997                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1998                 /* it's been longer than MIN_IDLE_SECONDS since we
1999                 * processed our last controlvm message; slow down the
2000                 * polling
2001                 */
2002                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2003                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2004         } else {
2005                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2006                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2007         }
2008
2009         queue_delayed_work(periodic_controlvm_workqueue,
2010                            &periodic_controlvm_work, poll_jiffies);
2011 }
2012
2013 static void
2014 setup_crash_devices_work_queue(struct work_struct *work)
2015 {
2016         struct controlvm_message local_crash_bus_msg;
2017         struct controlvm_message local_crash_dev_msg;
2018         struct controlvm_message msg;
2019         u32 local_crash_msg_offset;
2020         u16 local_crash_msg_count;
2021
2022         /* make sure visorbus is registered for controlvm callbacks */
2023         if (visorchipset_visorbusregwait && !visorbusregistered)
2024                 goto cleanup;
2025
2026         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2027
2028         /* send init chipset msg */
2029         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2030         msg.cmd.init_chipset.bus_count = 23;
2031         msg.cmd.init_chipset.switch_count = 0;
2032
2033         chipset_init(&msg);
2034
2035         /* get saved message count */
2036         if (visorchannel_read(controlvm_channel,
2037                               offsetof(struct spar_controlvm_channel_protocol,
2038                                        saved_crash_message_count),
2039                               &local_crash_msg_count, sizeof(u16)) < 0) {
2040                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2041                                  POSTCODE_SEVERITY_ERR);
2042                 return;
2043         }
2044
2045         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
2046                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2047                                  local_crash_msg_count,
2048                                  POSTCODE_SEVERITY_ERR);
2049                 return;
2050         }
2051
2052         /* get saved crash message offset */
2053         if (visorchannel_read(controlvm_channel,
2054                               offsetof(struct spar_controlvm_channel_protocol,
2055                                        saved_crash_message_offset),
2056                               &local_crash_msg_offset, sizeof(u32)) < 0) {
2057                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2058                                  POSTCODE_SEVERITY_ERR);
2059                 return;
2060         }
2061
2062         /* read create device message for storage bus offset */
2063         if (visorchannel_read(controlvm_channel,
2064                               local_crash_msg_offset,
2065                               &local_crash_bus_msg,
2066                               sizeof(struct controlvm_message)) < 0) {
2067                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2068                                  POSTCODE_SEVERITY_ERR);
2069                 return;
2070         }
2071
2072         /* read create device message for storage device */
2073         if (visorchannel_read(controlvm_channel,
2074                               local_crash_msg_offset +
2075                               sizeof(struct controlvm_message),
2076                               &local_crash_dev_msg,
2077                               sizeof(struct controlvm_message)) < 0) {
2078                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2079                                  POSTCODE_SEVERITY_ERR);
2080                 return;
2081         }
2082
2083         /* reuse IOVM create bus message */
2084         if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
2085                 bus_create(&local_crash_bus_msg);
2086         } else {
2087                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2088                                  POSTCODE_SEVERITY_ERR);
2089                 return;
2090         }
2091
2092         /* reuse create device message for storage device */
2093         if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2094                 my_device_create(&local_crash_dev_msg);
2095         } else {
2096                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2097                                  POSTCODE_SEVERITY_ERR);
2098                 return;
2099         }
2100         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2101         return;
2102
2103 cleanup:
2104
2105         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2106
2107         queue_delayed_work(periodic_controlvm_workqueue,
2108                            &periodic_controlvm_work, poll_jiffies);
2109 }
2110
2111 static void
2112 bus_create_response(struct visorchipset_bus_info *bus_info, int response)
2113 {
2114         bus_responder(CONTROLVM_BUS_CREATE, bus_info, response);
2115 }
2116
2117 static void
2118 bus_destroy_response(struct visorchipset_bus_info *bus_info, int response)
2119 {
2120         bus_responder(CONTROLVM_BUS_DESTROY, bus_info, response);
2121 }
2122
2123 static void
2124 device_create_response(u32 bus_no, u32 dev_no, int response)
2125 {
2126         device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
2127 }
2128
2129 static void
2130 device_destroy_response(u32 bus_no, u32 dev_no, int response)
2131 {
2132         device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
2133 }
2134
2135 static void
2136 visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
2137 {
2138         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2139                                      bus_no, dev_no, response,
2140                                      segment_state_standby);
2141 }
2142
2143 static void
2144 device_resume_response(u32 bus_no, u32 dev_no, int response)
2145 {
2146         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2147                                      bus_no, dev_no, response,
2148                                      segment_state_running);
2149 }
2150
2151 bool
2152 visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
2153 {
2154         void *p = bus_find(&bus_info_list, bus_no);
2155
2156         if (!p)
2157                 return false;
2158         memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2159         return true;
2160 }
2161 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2162
2163 bool
2164 visorchipset_set_bus_context(struct visorchipset_bus_info *p, void *context)
2165 {
2166         if (!p)
2167                 return false;
2168         p->bus_driver_context = context;
2169         return true;
2170 }
2171 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2172
2173 bool
2174 visorchipset_get_device_info(u32 bus_no, u32 dev_no,
2175                              struct visorchipset_device_info *dev_info)
2176 {
2177         void *p = device_find(&dev_info_list, bus_no, dev_no);
2178
2179         if (!p)
2180                 return false;
2181         memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2182         return true;
2183 }
2184 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2185
2186 bool
2187 visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
2188 {
2189         struct visorchipset_device_info *p;
2190
2191         p = device_find(&dev_info_list, bus_no, dev_no);
2192
2193         if (!p)
2194                 return false;
2195         p->bus_driver_context = context;
2196         return true;
2197 }
2198 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2199
2200 static ssize_t chipsetready_store(struct device *dev,
2201                                   struct device_attribute *attr,
2202                                   const char *buf, size_t count)
2203 {
2204         char msgtype[64];
2205
2206         if (sscanf(buf, "%63s", msgtype) != 1)
2207                 return -EINVAL;
2208
2209         if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2210                 chipset_events[0] = 1;
2211                 return count;
2212         } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2213                 chipset_events[1] = 1;
2214                 return count;
2215         }
2216         return -EINVAL;
2217 }
2218
2219 /* The parahotplug/devicedisabled interface gets called by our support script
2220  * when an SR-IOV device has been shut down. The ID is passed to the script
2221  * and then passed back when the device has been removed.
2222  */
2223 static ssize_t devicedisabled_store(struct device *dev,
2224                                     struct device_attribute *attr,
2225                                     const char *buf, size_t count)
2226 {
2227         unsigned int id;
2228
2229         if (kstrtouint(buf, 10, &id))
2230                 return -EINVAL;
2231
2232         parahotplug_request_complete(id, 0);
2233         return count;
2234 }
2235
2236 /* The parahotplug/deviceenabled interface gets called by our support script
2237  * when an SR-IOV device has been recovered. The ID is passed to the script
2238  * and then passed back when the device has been brought back up.
2239  */
2240 static ssize_t deviceenabled_store(struct device *dev,
2241                                    struct device_attribute *attr,
2242                                    const char *buf, size_t count)
2243 {
2244         unsigned int id;
2245
2246         if (kstrtouint(buf, 10, &id))
2247                 return -EINVAL;
2248
2249         parahotplug_request_complete(id, 1);
2250         return count;
2251 }
2252
2253 static int
2254 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2255 {
2256         unsigned long physaddr = 0;
2257         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2258         u64 addr = 0;
2259
2260         /* sv_enable_dfp(); */
2261         if (offset & (PAGE_SIZE - 1))
2262                 return -ENXIO;  /* need aligned offsets */
2263
2264         switch (offset) {
2265         case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2266                 vma->vm_flags |= VM_IO;
2267                 if (!*file_controlvm_channel)
2268                         return -ENXIO;
2269
2270                 visorchannel_read(*file_controlvm_channel,
2271                         offsetof(struct spar_controlvm_channel_protocol,
2272                                  gp_control_channel),
2273                         &addr, sizeof(addr));
2274                 if (!addr)
2275                         return -ENXIO;
2276
2277                 physaddr = (unsigned long)addr;
2278                 if (remap_pfn_range(vma, vma->vm_start,
2279                                     physaddr >> PAGE_SHIFT,
2280                                     vma->vm_end - vma->vm_start,
2281                                     /*pgprot_noncached */
2282                                     (vma->vm_page_prot))) {
2283                         return -EAGAIN;
2284                 }
2285                 break;
2286         default:
2287                 return -ENXIO;
2288         }
2289         return 0;
2290 }
2291
2292 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2293 {
2294         u64 result = VMCALL_SUCCESS;
2295         u64 physaddr = 0;
2296
2297         ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2298                         result);
2299         return result;
2300 }
2301
2302 static inline int issue_vmcall_update_physical_time(u64 adjustment)
2303 {
2304         int result = VMCALL_SUCCESS;
2305
2306         ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2307         return result;
2308 }
2309
2310 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2311                                unsigned long arg)
2312 {
2313         s64 adjustment;
2314         s64 vrtc_offset;
2315
2316         switch (cmd) {
2317         case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2318                 /* get the physical rtc offset */
2319                 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2320                 if (copy_to_user((void __user *)arg, &vrtc_offset,
2321                                  sizeof(vrtc_offset))) {
2322                         return -EFAULT;
2323                 }
2324                 return 0;
2325         case VMCALL_UPDATE_PHYSICAL_TIME:
2326                 if (copy_from_user(&adjustment, (void __user *)arg,
2327                                    sizeof(adjustment))) {
2328                         return -EFAULT;
2329                 }
2330                 return issue_vmcall_update_physical_time(adjustment);
2331         default:
2332                 return -EFAULT;
2333         }
2334 }
2335
2336 static const struct file_operations visorchipset_fops = {
2337         .owner = THIS_MODULE,
2338         .open = visorchipset_open,
2339         .read = NULL,
2340         .write = NULL,
2341         .unlocked_ioctl = visorchipset_ioctl,
2342         .release = visorchipset_release,
2343         .mmap = visorchipset_mmap,
2344 };
2345
2346 static int
2347 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2348 {
2349         int rc = 0;
2350
2351         file_controlvm_channel = controlvm_channel;
2352         cdev_init(&file_cdev, &visorchipset_fops);
2353         file_cdev.owner = THIS_MODULE;
2354         if (MAJOR(major_dev) == 0) {
2355                 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2356                 /* dynamic major device number registration required */
2357                 if (rc < 0)
2358                         return rc;
2359         } else {
2360                 /* static major device number registration required */
2361                 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2362                 if (rc < 0)
2363                         return rc;
2364         }
2365         rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2366         if (rc < 0) {
2367                 unregister_chrdev_region(major_dev, 1);
2368                 return rc;
2369         }
2370         return 0;
2371 }
2372
2373 static int
2374 visorchipset_init(struct acpi_device *acpi_device)
2375 {
2376         int rc = 0;
2377         u64 addr;
2378         int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2379         uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2380
2381         addr = controlvm_get_channel_address();
2382         if (!addr)
2383                 return -ENODEV;
2384
2385         memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2386         memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2387
2388         controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2389                                                           GFP_KERNEL, uuid);
2390         if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2391                     visorchannel_get_header(controlvm_channel))) {
2392                 initialize_controlvm_payload();
2393         } else {
2394                 visorchannel_destroy(controlvm_channel);
2395                 controlvm_channel = NULL;
2396                 return -ENODEV;
2397         }
2398
2399         major_dev = MKDEV(visorchipset_major, 0);
2400         rc = visorchipset_file_init(major_dev, &controlvm_channel);
2401         if (rc < 0) {
2402                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2403                 goto cleanup;
2404         }
2405
2406         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2407
2408         /* if booting in a crash kernel */
2409         if (is_kdump_kernel())
2410                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2411                                   setup_crash_devices_work_queue);
2412         else
2413                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2414                                   controlvm_periodic_work);
2415         periodic_controlvm_workqueue =
2416             create_singlethread_workqueue("visorchipset_controlvm");
2417
2418         if (!periodic_controlvm_workqueue) {
2419                 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2420                                  DIAG_SEVERITY_ERR);
2421                 rc = -ENOMEM;
2422                 goto cleanup;
2423         }
2424         most_recent_message_jiffies = jiffies;
2425         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2426         rc = queue_delayed_work(periodic_controlvm_workqueue,
2427                                 &periodic_controlvm_work, poll_jiffies);
2428         if (rc < 0) {
2429                 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2430                                  DIAG_SEVERITY_ERR);
2431                 goto cleanup;
2432         }
2433
2434         visorchipset_platform_device.dev.devt = major_dev;
2435         if (platform_device_register(&visorchipset_platform_device) < 0) {
2436                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2437                 rc = -1;
2438                 goto cleanup;
2439         }
2440         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2441
2442         rc = visorbus_init();
2443 cleanup:
2444         if (rc) {
2445                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2446                                  POSTCODE_SEVERITY_ERR);
2447         }
2448         return rc;
2449 }
2450
2451 static void
2452 visorchipset_file_cleanup(dev_t major_dev)
2453 {
2454         if (file_cdev.ops)
2455                 cdev_del(&file_cdev);
2456         file_cdev.ops = NULL;
2457         unregister_chrdev_region(major_dev, 1);
2458 }
2459
2460 static int
2461 visorchipset_exit(struct acpi_device *acpi_device)
2462 {
2463         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2464
2465         visorbus_exit();
2466
2467         cancel_delayed_work(&periodic_controlvm_work);
2468         flush_workqueue(periodic_controlvm_workqueue);
2469         destroy_workqueue(periodic_controlvm_workqueue);
2470         periodic_controlvm_workqueue = NULL;
2471         destroy_controlvm_payload_info(&controlvm_payload_info);
2472
2473         cleanup_controlvm_structures();
2474
2475         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2476
2477         visorchannel_destroy(controlvm_channel);
2478
2479         visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2480         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2481
2482         return 0;
2483 }
2484
2485 static const struct acpi_device_id unisys_device_ids[] = {
2486         {"PNP0A07", 0},
2487         {"", 0},
2488 };
2489
2490 static struct acpi_driver unisys_acpi_driver = {
2491         .name = "unisys_acpi",
2492         .class = "unisys_acpi_class",
2493         .owner = THIS_MODULE,
2494         .ids = unisys_device_ids,
2495         .ops = {
2496                 .add = visorchipset_init,
2497                 .remove = visorchipset_exit,
2498                 },
2499 };
2500 static __init uint32_t visorutil_spar_detect(void)
2501 {
2502         unsigned int eax, ebx, ecx, edx;
2503
2504         if (cpu_has_hypervisor) {
2505                 /* check the ID */
2506                 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2507                 return  (ebx == UNISYS_SPAR_ID_EBX) &&
2508                         (ecx == UNISYS_SPAR_ID_ECX) &&
2509                         (edx == UNISYS_SPAR_ID_EDX);
2510         } else {
2511                 return 0;
2512         }
2513 }
2514
2515 static int init_unisys(void)
2516 {
2517         int result;
2518         if (!visorutil_spar_detect())
2519                 return -ENODEV;
2520
2521         result = acpi_bus_register_driver(&unisys_acpi_driver);
2522         if (result)
2523                 return -ENODEV;
2524
2525         pr_info("Unisys Visorchipset Driver Loaded.\n");
2526         return 0;
2527 };
2528
2529 static void exit_unisys(void)
2530 {
2531         acpi_bus_unregister_driver(&unisys_acpi_driver);
2532 }
2533
2534 module_param_named(major, visorchipset_major, int, S_IRUGO);
2535 MODULE_PARM_DESC(visorchipset_major,
2536                  "major device number to use for the device node");
2537 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2538 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2539                  "1 to have the module wait for the visor bus to register");
2540 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2541                    int, S_IRUGO);
2542 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2543                  "1 to hold response to CHIPSET_READY");
2544
2545 module_init(init_unisys);
2546 module_exit(exit_unisys);
2547
2548 MODULE_AUTHOR("Unisys");
2549 MODULE_LICENSE("GPL");
2550 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2551                    VERSION);
2552 MODULE_VERSION(VERSION);