2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/clockchips.h>
36 #include <linux/cpu.h>
37 #include <asm/hyperv.h>
38 #include <asm/hypervisor.h>
39 #include <asm/mshyperv.h>
40 #include <linux/notifier.h>
41 #include <linux/ptrace.h>
42 #include <linux/screen_info.h>
43 #include <linux/kdebug.h>
44 #include <linux/random.h>
45 #include "hyperv_vmbus.h"
47 static struct acpi_device *hv_acpi_dev;
49 static struct tasklet_struct msg_dpc;
50 static struct completion probe_event;
54 static void hyperv_report_panic(struct pt_regs *regs)
56 static bool panic_reported;
59 * We prefer to report panic on 'die' chain as we have proper
60 * registers to report, but if we miss it (e.g. on BUG()) we need
61 * to report it on 'panic'.
65 panic_reported = true;
67 wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
68 wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
69 wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
70 wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
71 wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
74 * Let Hyper-V know there is crash data available
76 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
79 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
84 regs = current_pt_regs();
86 hyperv_report_panic(regs);
90 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
93 struct die_args *die = (struct die_args *)args;
94 struct pt_regs *regs = die->regs;
96 hyperv_report_panic(regs);
100 static struct notifier_block hyperv_die_block = {
101 .notifier_call = hyperv_die_event,
103 static struct notifier_block hyperv_panic_block = {
104 .notifier_call = hyperv_panic_event,
107 struct resource *hyperv_mmio;
109 static int vmbus_exists(void)
111 if (hv_acpi_dev == NULL)
117 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
118 static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
121 for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
122 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
125 static u8 channel_monitor_group(struct vmbus_channel *channel)
127 return (u8)channel->offermsg.monitorid / 32;
130 static u8 channel_monitor_offset(struct vmbus_channel *channel)
132 return (u8)channel->offermsg.monitorid % 32;
135 static u32 channel_pending(struct vmbus_channel *channel,
136 struct hv_monitor_page *monitor_page)
138 u8 monitor_group = channel_monitor_group(channel);
139 return monitor_page->trigger_group[monitor_group].pending;
142 static u32 channel_latency(struct vmbus_channel *channel,
143 struct hv_monitor_page *monitor_page)
145 u8 monitor_group = channel_monitor_group(channel);
146 u8 monitor_offset = channel_monitor_offset(channel);
147 return monitor_page->latency[monitor_group][monitor_offset];
150 static u32 channel_conn_id(struct vmbus_channel *channel,
151 struct hv_monitor_page *monitor_page)
153 u8 monitor_group = channel_monitor_group(channel);
154 u8 monitor_offset = channel_monitor_offset(channel);
155 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
158 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
161 struct hv_device *hv_dev = device_to_hv_device(dev);
163 if (!hv_dev->channel)
165 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
167 static DEVICE_ATTR_RO(id);
169 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
172 struct hv_device *hv_dev = device_to_hv_device(dev);
174 if (!hv_dev->channel)
176 return sprintf(buf, "%d\n", hv_dev->channel->state);
178 static DEVICE_ATTR_RO(state);
180 static ssize_t monitor_id_show(struct device *dev,
181 struct device_attribute *dev_attr, char *buf)
183 struct hv_device *hv_dev = device_to_hv_device(dev);
185 if (!hv_dev->channel)
187 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
189 static DEVICE_ATTR_RO(monitor_id);
191 static ssize_t class_id_show(struct device *dev,
192 struct device_attribute *dev_attr, char *buf)
194 struct hv_device *hv_dev = device_to_hv_device(dev);
196 if (!hv_dev->channel)
198 return sprintf(buf, "{%pUl}\n",
199 hv_dev->channel->offermsg.offer.if_type.b);
201 static DEVICE_ATTR_RO(class_id);
203 static ssize_t device_id_show(struct device *dev,
204 struct device_attribute *dev_attr, char *buf)
206 struct hv_device *hv_dev = device_to_hv_device(dev);
208 if (!hv_dev->channel)
210 return sprintf(buf, "{%pUl}\n",
211 hv_dev->channel->offermsg.offer.if_instance.b);
213 static DEVICE_ATTR_RO(device_id);
215 static ssize_t modalias_show(struct device *dev,
216 struct device_attribute *dev_attr, char *buf)
218 struct hv_device *hv_dev = device_to_hv_device(dev);
219 char alias_name[VMBUS_ALIAS_LEN + 1];
221 print_alias_name(hv_dev, alias_name);
222 return sprintf(buf, "vmbus:%s\n", alias_name);
224 static DEVICE_ATTR_RO(modalias);
226 static ssize_t server_monitor_pending_show(struct device *dev,
227 struct device_attribute *dev_attr,
230 struct hv_device *hv_dev = device_to_hv_device(dev);
232 if (!hv_dev->channel)
234 return sprintf(buf, "%d\n",
235 channel_pending(hv_dev->channel,
236 vmbus_connection.monitor_pages[1]));
238 static DEVICE_ATTR_RO(server_monitor_pending);
240 static ssize_t client_monitor_pending_show(struct device *dev,
241 struct device_attribute *dev_attr,
244 struct hv_device *hv_dev = device_to_hv_device(dev);
246 if (!hv_dev->channel)
248 return sprintf(buf, "%d\n",
249 channel_pending(hv_dev->channel,
250 vmbus_connection.monitor_pages[1]));
252 static DEVICE_ATTR_RO(client_monitor_pending);
254 static ssize_t server_monitor_latency_show(struct device *dev,
255 struct device_attribute *dev_attr,
258 struct hv_device *hv_dev = device_to_hv_device(dev);
260 if (!hv_dev->channel)
262 return sprintf(buf, "%d\n",
263 channel_latency(hv_dev->channel,
264 vmbus_connection.monitor_pages[0]));
266 static DEVICE_ATTR_RO(server_monitor_latency);
268 static ssize_t client_monitor_latency_show(struct device *dev,
269 struct device_attribute *dev_attr,
272 struct hv_device *hv_dev = device_to_hv_device(dev);
274 if (!hv_dev->channel)
276 return sprintf(buf, "%d\n",
277 channel_latency(hv_dev->channel,
278 vmbus_connection.monitor_pages[1]));
280 static DEVICE_ATTR_RO(client_monitor_latency);
282 static ssize_t server_monitor_conn_id_show(struct device *dev,
283 struct device_attribute *dev_attr,
286 struct hv_device *hv_dev = device_to_hv_device(dev);
288 if (!hv_dev->channel)
290 return sprintf(buf, "%d\n",
291 channel_conn_id(hv_dev->channel,
292 vmbus_connection.monitor_pages[0]));
294 static DEVICE_ATTR_RO(server_monitor_conn_id);
296 static ssize_t client_monitor_conn_id_show(struct device *dev,
297 struct device_attribute *dev_attr,
300 struct hv_device *hv_dev = device_to_hv_device(dev);
302 if (!hv_dev->channel)
304 return sprintf(buf, "%d\n",
305 channel_conn_id(hv_dev->channel,
306 vmbus_connection.monitor_pages[1]));
308 static DEVICE_ATTR_RO(client_monitor_conn_id);
310 static ssize_t out_intr_mask_show(struct device *dev,
311 struct device_attribute *dev_attr, char *buf)
313 struct hv_device *hv_dev = device_to_hv_device(dev);
314 struct hv_ring_buffer_debug_info outbound;
316 if (!hv_dev->channel)
318 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
319 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
321 static DEVICE_ATTR_RO(out_intr_mask);
323 static ssize_t out_read_index_show(struct device *dev,
324 struct device_attribute *dev_attr, char *buf)
326 struct hv_device *hv_dev = device_to_hv_device(dev);
327 struct hv_ring_buffer_debug_info outbound;
329 if (!hv_dev->channel)
331 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
332 return sprintf(buf, "%d\n", outbound.current_read_index);
334 static DEVICE_ATTR_RO(out_read_index);
336 static ssize_t out_write_index_show(struct device *dev,
337 struct device_attribute *dev_attr,
340 struct hv_device *hv_dev = device_to_hv_device(dev);
341 struct hv_ring_buffer_debug_info outbound;
343 if (!hv_dev->channel)
345 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
346 return sprintf(buf, "%d\n", outbound.current_write_index);
348 static DEVICE_ATTR_RO(out_write_index);
350 static ssize_t out_read_bytes_avail_show(struct device *dev,
351 struct device_attribute *dev_attr,
354 struct hv_device *hv_dev = device_to_hv_device(dev);
355 struct hv_ring_buffer_debug_info outbound;
357 if (!hv_dev->channel)
359 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
360 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
362 static DEVICE_ATTR_RO(out_read_bytes_avail);
364 static ssize_t out_write_bytes_avail_show(struct device *dev,
365 struct device_attribute *dev_attr,
368 struct hv_device *hv_dev = device_to_hv_device(dev);
369 struct hv_ring_buffer_debug_info outbound;
371 if (!hv_dev->channel)
373 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
374 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
376 static DEVICE_ATTR_RO(out_write_bytes_avail);
378 static ssize_t in_intr_mask_show(struct device *dev,
379 struct device_attribute *dev_attr, char *buf)
381 struct hv_device *hv_dev = device_to_hv_device(dev);
382 struct hv_ring_buffer_debug_info inbound;
384 if (!hv_dev->channel)
386 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
387 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
389 static DEVICE_ATTR_RO(in_intr_mask);
391 static ssize_t in_read_index_show(struct device *dev,
392 struct device_attribute *dev_attr, char *buf)
394 struct hv_device *hv_dev = device_to_hv_device(dev);
395 struct hv_ring_buffer_debug_info inbound;
397 if (!hv_dev->channel)
399 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
400 return sprintf(buf, "%d\n", inbound.current_read_index);
402 static DEVICE_ATTR_RO(in_read_index);
404 static ssize_t in_write_index_show(struct device *dev,
405 struct device_attribute *dev_attr, char *buf)
407 struct hv_device *hv_dev = device_to_hv_device(dev);
408 struct hv_ring_buffer_debug_info inbound;
410 if (!hv_dev->channel)
412 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
413 return sprintf(buf, "%d\n", inbound.current_write_index);
415 static DEVICE_ATTR_RO(in_write_index);
417 static ssize_t in_read_bytes_avail_show(struct device *dev,
418 struct device_attribute *dev_attr,
421 struct hv_device *hv_dev = device_to_hv_device(dev);
422 struct hv_ring_buffer_debug_info inbound;
424 if (!hv_dev->channel)
426 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
427 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
429 static DEVICE_ATTR_RO(in_read_bytes_avail);
431 static ssize_t in_write_bytes_avail_show(struct device *dev,
432 struct device_attribute *dev_attr,
435 struct hv_device *hv_dev = device_to_hv_device(dev);
436 struct hv_ring_buffer_debug_info inbound;
438 if (!hv_dev->channel)
440 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
441 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
443 static DEVICE_ATTR_RO(in_write_bytes_avail);
445 static ssize_t channel_vp_mapping_show(struct device *dev,
446 struct device_attribute *dev_attr,
449 struct hv_device *hv_dev = device_to_hv_device(dev);
450 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
452 int buf_size = PAGE_SIZE, n_written, tot_written;
453 struct list_head *cur;
458 tot_written = snprintf(buf, buf_size, "%u:%u\n",
459 channel->offermsg.child_relid, channel->target_cpu);
461 spin_lock_irqsave(&channel->lock, flags);
463 list_for_each(cur, &channel->sc_list) {
464 if (tot_written >= buf_size - 1)
467 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
468 n_written = scnprintf(buf + tot_written,
469 buf_size - tot_written,
471 cur_sc->offermsg.child_relid,
473 tot_written += n_written;
476 spin_unlock_irqrestore(&channel->lock, flags);
480 static DEVICE_ATTR_RO(channel_vp_mapping);
482 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
483 static struct attribute *vmbus_attrs[] = {
485 &dev_attr_state.attr,
486 &dev_attr_monitor_id.attr,
487 &dev_attr_class_id.attr,
488 &dev_attr_device_id.attr,
489 &dev_attr_modalias.attr,
490 &dev_attr_server_monitor_pending.attr,
491 &dev_attr_client_monitor_pending.attr,
492 &dev_attr_server_monitor_latency.attr,
493 &dev_attr_client_monitor_latency.attr,
494 &dev_attr_server_monitor_conn_id.attr,
495 &dev_attr_client_monitor_conn_id.attr,
496 &dev_attr_out_intr_mask.attr,
497 &dev_attr_out_read_index.attr,
498 &dev_attr_out_write_index.attr,
499 &dev_attr_out_read_bytes_avail.attr,
500 &dev_attr_out_write_bytes_avail.attr,
501 &dev_attr_in_intr_mask.attr,
502 &dev_attr_in_read_index.attr,
503 &dev_attr_in_write_index.attr,
504 &dev_attr_in_read_bytes_avail.attr,
505 &dev_attr_in_write_bytes_avail.attr,
506 &dev_attr_channel_vp_mapping.attr,
509 ATTRIBUTE_GROUPS(vmbus);
512 * vmbus_uevent - add uevent for our device
514 * This routine is invoked when a device is added or removed on the vmbus to
515 * generate a uevent to udev in the userspace. The udev will then look at its
516 * rule and the uevent generated here to load the appropriate driver
518 * The alias string will be of the form vmbus:guid where guid is the string
519 * representation of the device guid (each byte of the guid will be
520 * represented with two hex characters.
522 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
524 struct hv_device *dev = device_to_hv_device(device);
526 char alias_name[VMBUS_ALIAS_LEN + 1];
528 print_alias_name(dev, alias_name);
529 ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
533 static const uuid_le null_guid;
535 static inline bool is_null_guid(const __u8 *guid)
537 if (memcmp(guid, &null_guid, sizeof(uuid_le)))
543 * Return a matching hv_vmbus_device_id pointer.
544 * If there is no match, return NULL.
546 static const struct hv_vmbus_device_id *hv_vmbus_get_id(
547 const struct hv_vmbus_device_id *id,
550 for (; !is_null_guid(id->guid); id++)
551 if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
560 * vmbus_match - Attempt to match the specified device to the specified driver
562 static int vmbus_match(struct device *device, struct device_driver *driver)
564 struct hv_driver *drv = drv_to_hv_drv(driver);
565 struct hv_device *hv_dev = device_to_hv_device(device);
567 if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
574 * vmbus_probe - Add the new vmbus's child device
576 static int vmbus_probe(struct device *child_device)
579 struct hv_driver *drv =
580 drv_to_hv_drv(child_device->driver);
581 struct hv_device *dev = device_to_hv_device(child_device);
582 const struct hv_vmbus_device_id *dev_id;
584 dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
586 ret = drv->probe(dev, dev_id);
588 pr_err("probe failed for device %s (%d)\n",
589 dev_name(child_device), ret);
592 pr_err("probe not set for driver %s\n",
593 dev_name(child_device));
600 * vmbus_remove - Remove a vmbus device
602 static int vmbus_remove(struct device *child_device)
604 struct hv_driver *drv;
605 struct hv_device *dev = device_to_hv_device(child_device);
607 if (child_device->driver) {
608 drv = drv_to_hv_drv(child_device->driver);
618 * vmbus_shutdown - Shutdown a vmbus device
620 static void vmbus_shutdown(struct device *child_device)
622 struct hv_driver *drv;
623 struct hv_device *dev = device_to_hv_device(child_device);
626 /* The device may not be attached yet */
627 if (!child_device->driver)
630 drv = drv_to_hv_drv(child_device->driver);
640 * vmbus_device_release - Final callback release of the vmbus child device
642 static void vmbus_device_release(struct device *device)
644 struct hv_device *hv_dev = device_to_hv_device(device);
645 struct vmbus_channel *channel = hv_dev->channel;
647 hv_process_channel_removal(channel,
648 channel->offermsg.child_relid);
653 /* The one and only one */
654 static struct bus_type hv_bus = {
656 .match = vmbus_match,
657 .shutdown = vmbus_shutdown,
658 .remove = vmbus_remove,
659 .probe = vmbus_probe,
660 .uevent = vmbus_uevent,
661 .dev_groups = vmbus_groups,
664 struct onmessage_work_context {
665 struct work_struct work;
666 struct hv_message msg;
669 static void vmbus_onmessage_work(struct work_struct *work)
671 struct onmessage_work_context *ctx;
673 /* Do not process messages if we're in DISCONNECTED state */
674 if (vmbus_connection.conn_state == DISCONNECTED)
677 ctx = container_of(work, struct onmessage_work_context,
679 vmbus_onmessage(&ctx->msg);
683 static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
685 struct clock_event_device *dev = hv_context.clk_evt[cpu];
687 if (dev->event_handler)
688 dev->event_handler(dev);
690 msg->header.message_type = HVMSG_NONE;
693 * Make sure the write to MessageType (ie set to
694 * HVMSG_NONE) happens before we read the
695 * MessagePending and EOMing. Otherwise, the EOMing
696 * will not deliver any more messages since there is
701 if (msg->header.message_flags.msg_pending) {
703 * This will cause message queue rescan to
704 * possibly deliver another msg from the
707 wrmsrl(HV_X64_MSR_EOM, 0);
711 static void vmbus_on_msg_dpc(unsigned long data)
713 int cpu = smp_processor_id();
714 void *page_addr = hv_context.synic_message_page[cpu];
715 struct hv_message *msg = (struct hv_message *)page_addr +
717 struct vmbus_channel_message_header *hdr;
718 struct vmbus_channel_message_table_entry *entry;
719 struct onmessage_work_context *ctx;
722 if (msg->header.message_type == HVMSG_NONE)
726 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
728 if (hdr->msgtype >= CHANNELMSG_COUNT) {
729 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
733 entry = &channel_message_table[hdr->msgtype];
734 if (entry->handler_type == VMHT_BLOCKING) {
735 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
739 INIT_WORK(&ctx->work, vmbus_onmessage_work);
740 memcpy(&ctx->msg, msg, sizeof(*msg));
742 queue_work(vmbus_connection.work_queue, &ctx->work);
744 entry->message_handler(hdr);
747 msg->header.message_type = HVMSG_NONE;
750 * Make sure the write to MessageType (ie set to
751 * HVMSG_NONE) happens before we read the
752 * MessagePending and EOMing. Otherwise, the EOMing
753 * will not deliver any more messages since there is
758 if (msg->header.message_flags.msg_pending) {
760 * This will cause message queue rescan to
761 * possibly deliver another msg from the
764 wrmsrl(HV_X64_MSR_EOM, 0);
769 static void vmbus_isr(void)
771 int cpu = smp_processor_id();
773 struct hv_message *msg;
774 union hv_synic_event_flags *event;
775 bool handled = false;
777 page_addr = hv_context.synic_event_page[cpu];
778 if (page_addr == NULL)
781 event = (union hv_synic_event_flags *)page_addr +
784 * Check for events before checking for messages. This is the order
785 * in which events and messages are checked in Windows guests on
786 * Hyper-V, and the Windows team suggested we do the same.
789 if ((vmbus_proto_version == VERSION_WS2008) ||
790 (vmbus_proto_version == VERSION_WIN7)) {
792 /* Since we are a child, we only need to check bit 0 */
793 if (sync_test_and_clear_bit(0,
794 (unsigned long *) &event->flags32[0])) {
799 * Our host is win8 or above. The signaling mechanism
800 * has changed and we can directly look at the event page.
801 * If bit n is set then we have an interrup on the channel
808 tasklet_schedule(hv_context.event_dpc[cpu]);
811 page_addr = hv_context.synic_message_page[cpu];
812 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
814 /* Check if there are actual msgs to be processed */
815 if (msg->header.message_type != HVMSG_NONE) {
816 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
817 hv_process_timer_expiration(msg, cpu);
819 tasklet_schedule(&msg_dpc);
822 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
827 * vmbus_bus_init -Main vmbus driver initialization routine.
830 * - initialize the vmbus driver context
831 * - invoke the vmbus hv main init routine
832 * - get the irq resource
833 * - retrieve the channel offers
835 static int vmbus_bus_init(int irq)
839 /* Hypervisor initialization...setup hypercall page..etc */
842 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
846 tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
848 ret = bus_register(&hv_bus);
852 hv_setup_vmbus_irq(vmbus_isr);
854 ret = hv_synic_alloc();
858 * Initialize the per-cpu interrupt state and
859 * connect to the host.
861 on_each_cpu(hv_synic_init, NULL, 1);
862 ret = vmbus_connect();
866 if (vmbus_proto_version > VERSION_WIN7)
867 cpu_hotplug_disable();
870 * Only register if the crash MSRs are available
872 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
873 register_die_notifier(&hyperv_die_block);
874 atomic_notifier_chain_register(&panic_notifier_list,
875 &hyperv_panic_block);
878 vmbus_request_offers();
883 on_each_cpu(hv_synic_cleanup, NULL, 1);
886 hv_remove_vmbus_irq();
888 bus_unregister(&hv_bus);
897 * __vmbus_child_driver_register() - Register a vmbus's driver
898 * @hv_driver: Pointer to driver structure you want to register
899 * @owner: owner module of the drv
900 * @mod_name: module name string
902 * Registers the given driver with Linux through the 'driver_register()' call
903 * and sets up the hyper-v vmbus handling for this driver.
904 * It will return the state of the 'driver_register()' call.
907 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
911 pr_info("registering driver %s\n", hv_driver->name);
913 ret = vmbus_exists();
917 hv_driver->driver.name = hv_driver->name;
918 hv_driver->driver.owner = owner;
919 hv_driver->driver.mod_name = mod_name;
920 hv_driver->driver.bus = &hv_bus;
922 ret = driver_register(&hv_driver->driver);
926 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
929 * vmbus_driver_unregister() - Unregister a vmbus's driver
930 * @hv_driver: Pointer to driver structure you want to
933 * Un-register the given driver that was previous registered with a call to
934 * vmbus_driver_register()
936 void vmbus_driver_unregister(struct hv_driver *hv_driver)
938 pr_info("unregistering driver %s\n", hv_driver->name);
941 driver_unregister(&hv_driver->driver);
943 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
946 * vmbus_device_create - Creates and registers a new child device
949 struct hv_device *vmbus_device_create(const uuid_le *type,
950 const uuid_le *instance,
951 struct vmbus_channel *channel)
953 struct hv_device *child_device_obj;
955 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
956 if (!child_device_obj) {
957 pr_err("Unable to allocate device object for child device\n");
961 child_device_obj->channel = channel;
962 memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
963 memcpy(&child_device_obj->dev_instance, instance,
967 return child_device_obj;
971 * vmbus_device_register - Register the child device
973 int vmbus_device_register(struct hv_device *child_device_obj)
977 dev_set_name(&child_device_obj->device, "vmbus_%d",
978 child_device_obj->channel->id);
980 child_device_obj->device.bus = &hv_bus;
981 child_device_obj->device.parent = &hv_acpi_dev->dev;
982 child_device_obj->device.release = vmbus_device_release;
985 * Register with the LDM. This will kick off the driver/device
986 * binding...which will eventually call vmbus_match() and vmbus_probe()
988 ret = device_register(&child_device_obj->device);
991 pr_err("Unable to register child device\n");
993 pr_debug("child device %s registered\n",
994 dev_name(&child_device_obj->device));
1000 * vmbus_device_unregister - Remove the specified child device
1003 void vmbus_device_unregister(struct hv_device *device_obj)
1005 pr_debug("child device %s unregistered\n",
1006 dev_name(&device_obj->device));
1009 * Kick off the process of unregistering the device.
1010 * This will call vmbus_remove() and eventually vmbus_device_release()
1012 device_unregister(&device_obj->device);
1017 * VMBUS is an acpi enumerated device. Get the information we
1020 #define VTPM_BASE_ADDRESS 0xfed40000
1021 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
1023 resource_size_t start = 0;
1024 resource_size_t end = 0;
1025 struct resource *new_res;
1026 struct resource **old_res = &hyperv_mmio;
1027 struct resource **prev_res = NULL;
1029 switch (res->type) {
1030 case ACPI_RESOURCE_TYPE_IRQ:
1031 irq = res->data.irq.interrupts[0];
1035 * "Address" descriptors are for bus windows. Ignore
1036 * "memory" descriptors, which are for registers on
1039 case ACPI_RESOURCE_TYPE_ADDRESS32:
1040 start = res->data.address32.address.minimum;
1041 end = res->data.address32.address.maximum;
1044 case ACPI_RESOURCE_TYPE_ADDRESS64:
1045 start = res->data.address64.address.minimum;
1046 end = res->data.address64.address.maximum;
1050 /* Unused resource type */
1055 * Ignore ranges that are below 1MB, as they're not
1056 * necessary or useful here.
1061 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
1063 return AE_NO_MEMORY;
1065 /* If this range overlaps the virtual TPM, truncate it. */
1066 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
1067 end = VTPM_BASE_ADDRESS;
1069 new_res->name = "hyperv mmio";
1070 new_res->flags = IORESOURCE_MEM;
1071 new_res->start = start;
1080 if ((*old_res)->end < new_res->start) {
1081 new_res->sibling = *old_res;
1083 (*prev_res)->sibling = new_res;
1089 old_res = &(*old_res)->sibling;
1096 static int vmbus_acpi_remove(struct acpi_device *device)
1098 struct resource *cur_res;
1099 struct resource *next_res;
1102 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
1103 next_res = cur_res->sibling;
1112 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
1113 * @new: If successful, supplied a pointer to the
1114 * allocated MMIO space.
1115 * @device_obj: Identifies the caller
1116 * @min: Minimum guest physical address of the
1118 * @max: Maximum guest physical address
1119 * @size: Size of the range to be allocated
1120 * @align: Alignment of the range to be allocated
1121 * @fb_overlap_ok: Whether this allocation can be allowed
1122 * to overlap the video frame buffer.
1124 * This function walks the resources granted to VMBus by the
1125 * _CRS object in the ACPI namespace underneath the parent
1126 * "bridge" whether that's a root PCI bus in the Generation 1
1127 * case or a Module Device in the Generation 2 case. It then
1128 * attempts to allocate from the global MMIO pool in a way that
1129 * matches the constraints supplied in these parameters and by
1132 * Return: 0 on success, -errno on failure
1134 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1135 resource_size_t min, resource_size_t max,
1136 resource_size_t size, resource_size_t align,
1139 struct resource *iter;
1140 resource_size_t range_min, range_max, start, local_min, local_max;
1141 const char *dev_n = dev_name(&device_obj->device);
1142 u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
1145 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1146 if ((iter->start >= max) || (iter->end <= min))
1149 range_min = iter->start;
1150 range_max = iter->end;
1152 /* If this range overlaps the frame buffer, split it into
1154 for (i = 0; i < 2; i++) {
1155 local_min = range_min;
1156 local_max = range_max;
1157 if (fb_overlap_ok || (range_min >= fb_end) ||
1158 (range_max <= screen_info.lfb_base)) {
1161 if ((range_min <= screen_info.lfb_base) &&
1162 (range_max >= screen_info.lfb_base)) {
1164 * The frame buffer is in this window,
1165 * so trim this into the part that
1166 * preceeds the frame buffer.
1168 local_max = screen_info.lfb_base - 1;
1176 start = (local_min + align - 1) & ~(align - 1);
1177 for (; start + size - 1 <= local_max; start += align) {
1178 *new = request_mem_region_exclusive(start, size,
1188 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
1190 static int vmbus_acpi_add(struct acpi_device *device)
1193 int ret_val = -ENODEV;
1194 struct acpi_device *ancestor;
1196 hv_acpi_dev = device;
1198 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1199 vmbus_walk_resources, NULL);
1201 if (ACPI_FAILURE(result))
1204 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
1205 * firmware) is the VMOD that has the mmio ranges. Get that.
1207 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
1208 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
1209 vmbus_walk_resources, NULL);
1211 if (ACPI_FAILURE(result))
1219 complete(&probe_event);
1221 vmbus_acpi_remove(device);
1225 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
1230 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
1232 static struct acpi_driver vmbus_acpi_driver = {
1234 .ids = vmbus_acpi_device_ids,
1236 .add = vmbus_acpi_add,
1237 .remove = vmbus_acpi_remove,
1241 static void hv_kexec_handler(void)
1245 hv_synic_clockevents_cleanup();
1246 vmbus_initiate_unload();
1247 for_each_online_cpu(cpu)
1248 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
1252 static void hv_crash_handler(struct pt_regs *regs)
1254 vmbus_initiate_unload();
1256 * In crash handler we can't schedule synic cleanup for all CPUs,
1257 * doing the cleanup for current CPU only. This should be sufficient
1260 hv_synic_cleanup(NULL);
1264 static int __init hv_acpi_init(void)
1268 if (x86_hyper != &x86_hyper_ms_hyperv)
1271 init_completion(&probe_event);
1274 * Get irq resources first.
1276 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
1281 t = wait_for_completion_timeout(&probe_event, 5*HZ);
1292 ret = vmbus_bus_init(irq);
1296 hv_setup_kexec_handler(hv_kexec_handler);
1297 hv_setup_crash_handler(hv_crash_handler);
1302 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1307 static void __exit vmbus_exit(void)
1311 hv_remove_kexec_handler();
1312 hv_remove_crash_handler();
1313 vmbus_connection.conn_state = DISCONNECTED;
1314 hv_synic_clockevents_cleanup();
1316 hv_remove_vmbus_irq();
1317 tasklet_kill(&msg_dpc);
1318 vmbus_free_channels();
1319 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1320 unregister_die_notifier(&hyperv_die_block);
1321 atomic_notifier_chain_unregister(&panic_notifier_list,
1322 &hyperv_panic_block);
1324 bus_unregister(&hv_bus);
1326 for_each_online_cpu(cpu) {
1327 tasklet_kill(hv_context.event_dpc[cpu]);
1328 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
1331 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1332 if (vmbus_proto_version > VERSION_WIN7)
1333 cpu_hotplug_enable();
1337 MODULE_LICENSE("GPL");
1339 subsys_initcall(hv_acpi_init);
1340 module_exit(vmbus_exit);