2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/hyperv.h>
30 #include <linux/uio.h>
31 #include <linux/interrupt.h>
33 #include "hyperv_vmbus.h"
35 #define NUM_PAGES_SPANNED(addr, len) \
36 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
39 * vmbus_setevent- Trigger an event notification on the specified
42 static void vmbus_setevent(struct vmbus_channel *channel)
44 struct hv_monitor_page *monitorpage;
46 if (channel->offermsg.monitor_allocated) {
47 /* Each u32 represents 32 channels */
48 sync_set_bit(channel->offermsg.child_relid & 31,
49 (unsigned long *) vmbus_connection.send_int_page +
50 (channel->offermsg.child_relid >> 5));
52 /* Get the child to parent monitor page */
53 monitorpage = vmbus_connection.monitor_pages[1];
55 sync_set_bit(channel->monitor_bit,
56 (unsigned long *)&monitorpage->trigger_group
57 [channel->monitor_grp].pending);
60 vmbus_set_event(channel);
65 * vmbus_open - Open the specified channel.
67 int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
68 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
69 void (*onchannelcallback)(void *context), void *context)
71 struct vmbus_channel_open_channel *open_msg;
72 struct vmbus_channel_msginfo *open_info = NULL;
79 spin_lock_irqsave(&newchannel->lock, flags);
80 if (newchannel->state == CHANNEL_OPEN_STATE) {
81 newchannel->state = CHANNEL_OPENING_STATE;
83 spin_unlock_irqrestore(&newchannel->lock, flags);
86 spin_unlock_irqrestore(&newchannel->lock, flags);
88 newchannel->onchannel_callback = onchannelcallback;
89 newchannel->channel_callback_context = context;
91 /* Allocate the ring buffer */
92 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
93 GFP_KERNEL|__GFP_ZERO,
94 get_order(send_ringbuffer_size +
95 recv_ringbuffer_size));
98 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
99 get_order(send_ringbuffer_size +
100 recv_ringbuffer_size));
102 out = (void *)page_address(page);
109 in = (void *)((unsigned long)out + send_ringbuffer_size);
111 newchannel->ringbuffer_pages = out;
112 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
113 recv_ringbuffer_size) >> PAGE_SHIFT;
115 ret = hv_ringbuffer_init(
116 &newchannel->outbound, out, send_ringbuffer_size);
123 ret = hv_ringbuffer_init(
124 &newchannel->inbound, in, recv_ringbuffer_size);
131 /* Establish the gpadl for the ring buffer */
132 newchannel->ringbuffer_gpadlhandle = 0;
134 ret = vmbus_establish_gpadl(newchannel,
135 newchannel->outbound.ring_buffer,
136 send_ringbuffer_size +
137 recv_ringbuffer_size,
138 &newchannel->ringbuffer_gpadlhandle);
145 /* Create and init the channel open message */
146 open_info = kmalloc(sizeof(*open_info) +
147 sizeof(struct vmbus_channel_open_channel),
154 init_completion(&open_info->waitevent);
156 open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
157 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
158 open_msg->openid = newchannel->offermsg.child_relid;
159 open_msg->child_relid = newchannel->offermsg.child_relid;
160 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
161 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
163 open_msg->target_vp = newchannel->target_vp;
165 if (userdatalen > MAX_USER_DEFINED_BYTES) {
171 memcpy(open_msg->userdata, userdata, userdatalen);
173 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
174 list_add_tail(&open_info->msglistentry,
175 &vmbus_connection.chn_msg_list);
176 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
178 ret = vmbus_post_msg(open_msg,
179 sizeof(struct vmbus_channel_open_channel));
186 t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
192 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
193 list_del(&open_info->msglistentry);
194 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
196 if (open_info->response.open_result.status) {
201 newchannel->state = CHANNEL_OPENED_STATE;
206 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
207 list_del(&open_info->msglistentry);
208 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
211 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
214 free_pages((unsigned long)out,
215 get_order(send_ringbuffer_size + recv_ringbuffer_size));
217 newchannel->state = CHANNEL_OPEN_STATE;
220 EXPORT_SYMBOL_GPL(vmbus_open);
223 * create_gpadl_header - Creates a gpadl for the specified buffer
225 static int create_gpadl_header(void *kbuffer, u32 size,
226 struct vmbus_channel_msginfo **msginfo,
231 struct vmbus_channel_gpadl_header *gpadl_header;
232 struct vmbus_channel_gpadl_body *gpadl_body;
233 struct vmbus_channel_msginfo *msgheader;
234 struct vmbus_channel_msginfo *msgbody = NULL;
237 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
239 pagecount = size >> PAGE_SHIFT;
241 /* do we need a gpadl body msg */
242 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
243 sizeof(struct vmbus_channel_gpadl_header) -
244 sizeof(struct gpa_range);
245 pfncount = pfnsize / sizeof(u64);
247 if (pagecount > pfncount) {
248 /* we need a gpadl body */
249 /* fill in the header */
250 msgsize = sizeof(struct vmbus_channel_msginfo) +
251 sizeof(struct vmbus_channel_gpadl_header) +
252 sizeof(struct gpa_range) + pfncount * sizeof(u64);
253 msgheader = kzalloc(msgsize, GFP_KERNEL);
257 INIT_LIST_HEAD(&msgheader->submsglist);
258 msgheader->msgsize = msgsize;
260 gpadl_header = (struct vmbus_channel_gpadl_header *)
262 gpadl_header->rangecount = 1;
263 gpadl_header->range_buflen = sizeof(struct gpa_range) +
264 pagecount * sizeof(u64);
265 gpadl_header->range[0].byte_offset = 0;
266 gpadl_header->range[0].byte_count = size;
267 for (i = 0; i < pfncount; i++)
268 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
269 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
270 *msginfo = msgheader;
274 pfnleft = pagecount - pfncount;
276 /* how many pfns can we fit */
277 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
278 sizeof(struct vmbus_channel_gpadl_body);
279 pfncount = pfnsize / sizeof(u64);
281 /* fill in the body */
283 if (pfnleft > pfncount)
288 msgsize = sizeof(struct vmbus_channel_msginfo) +
289 sizeof(struct vmbus_channel_gpadl_body) +
290 pfncurr * sizeof(u64);
291 msgbody = kzalloc(msgsize, GFP_KERNEL);
294 struct vmbus_channel_msginfo *pos = NULL;
295 struct vmbus_channel_msginfo *tmp = NULL;
297 * Free up all the allocated messages.
299 list_for_each_entry_safe(pos, tmp,
300 &msgheader->submsglist,
303 list_del(&pos->msglistentry);
310 msgbody->msgsize = msgsize;
313 (struct vmbus_channel_gpadl_body *)msgbody->msg;
316 * Gpadl is u32 and we are using a pointer which could
318 * This is governed by the guest/host protocol and
319 * so the hypervisor gurantees that this is ok.
321 for (i = 0; i < pfncurr; i++)
322 gpadl_body->pfn[i] = slow_virt_to_phys(
323 kbuffer + PAGE_SIZE * (pfnsum + i)) >>
326 /* add to msg header */
327 list_add_tail(&msgbody->msglistentry,
328 &msgheader->submsglist);
333 /* everything fits in a header */
334 msgsize = sizeof(struct vmbus_channel_msginfo) +
335 sizeof(struct vmbus_channel_gpadl_header) +
336 sizeof(struct gpa_range) + pagecount * sizeof(u64);
337 msgheader = kzalloc(msgsize, GFP_KERNEL);
338 if (msgheader == NULL)
340 msgheader->msgsize = msgsize;
342 gpadl_header = (struct vmbus_channel_gpadl_header *)
344 gpadl_header->rangecount = 1;
345 gpadl_header->range_buflen = sizeof(struct gpa_range) +
346 pagecount * sizeof(u64);
347 gpadl_header->range[0].byte_offset = 0;
348 gpadl_header->range[0].byte_count = size;
349 for (i = 0; i < pagecount; i++)
350 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
351 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
353 *msginfo = msgheader;
365 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
367 * @channel: a channel
368 * @kbuffer: from kmalloc or vmalloc
369 * @size: page-size multiple
370 * @gpadl_handle: some funky thing
372 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
373 u32 size, u32 *gpadl_handle)
375 struct vmbus_channel_gpadl_header *gpadlmsg;
376 struct vmbus_channel_gpadl_body *gpadl_body;
377 struct vmbus_channel_msginfo *msginfo = NULL;
378 struct vmbus_channel_msginfo *submsginfo;
380 struct list_head *curr;
381 u32 next_gpadl_handle;
386 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
388 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
392 init_completion(&msginfo->waitevent);
394 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
395 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
396 gpadlmsg->child_relid = channel->offermsg.child_relid;
397 gpadlmsg->gpadl = next_gpadl_handle;
400 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
401 list_add_tail(&msginfo->msglistentry,
402 &vmbus_connection.chn_msg_list);
404 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
406 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
412 list_for_each(curr, &msginfo->submsglist) {
414 submsginfo = (struct vmbus_channel_msginfo *)curr;
416 (struct vmbus_channel_gpadl_body *)submsginfo->msg;
418 gpadl_body->header.msgtype =
419 CHANNELMSG_GPADL_BODY;
420 gpadl_body->gpadl = next_gpadl_handle;
422 ret = vmbus_post_msg(gpadl_body,
423 submsginfo->msgsize -
424 sizeof(*submsginfo));
430 wait_for_completion(&msginfo->waitevent);
432 /* At this point, we received the gpadl created msg */
433 *gpadl_handle = gpadlmsg->gpadl;
436 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
437 list_del(&msginfo->msglistentry);
438 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
443 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
446 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
448 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
450 struct vmbus_channel_gpadl_teardown *msg;
451 struct vmbus_channel_msginfo *info;
455 info = kmalloc(sizeof(*info) +
456 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
460 init_completion(&info->waitevent);
462 msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
464 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
465 msg->child_relid = channel->offermsg.child_relid;
466 msg->gpadl = gpadl_handle;
468 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
469 list_add_tail(&info->msglistentry,
470 &vmbus_connection.chn_msg_list);
471 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
472 ret = vmbus_post_msg(msg,
473 sizeof(struct vmbus_channel_gpadl_teardown));
478 wait_for_completion(&info->waitevent);
481 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
482 list_del(&info->msglistentry);
483 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
488 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
490 static void reset_channel_cb(void *arg)
492 struct vmbus_channel *channel = arg;
494 channel->onchannel_callback = NULL;
497 static int vmbus_close_internal(struct vmbus_channel *channel)
499 struct vmbus_channel_close_channel *msg;
500 struct tasklet_struct *tasklet;
504 * process_chn_event(), running in the tasklet, can race
505 * with vmbus_close_internal() in the case of SMP guest, e.g., when
506 * the former is accessing channel->inbound.ring_buffer, the latter
507 * could be freeing the ring_buffer pages.
509 * To resolve the race, we can serialize them by disabling the
510 * tasklet when the latter is running here.
512 tasklet = hv_context.event_dpc[channel->target_cpu];
513 tasklet_disable(tasklet);
515 channel->state = CHANNEL_OPEN_STATE;
516 channel->sc_creation_callback = NULL;
517 /* Stop callback and cancel the timer asap */
518 if (channel->target_cpu != get_cpu()) {
520 smp_call_function_single(channel->target_cpu, reset_channel_cb,
523 reset_channel_cb(channel);
527 /* Send a closing message */
529 msg = &channel->close_msg.msg;
531 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
532 msg->child_relid = channel->offermsg.child_relid;
534 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
537 pr_err("Close failed: close post msg return is %d\n", ret);
539 * If we failed to post the close msg,
540 * it is perhaps better to leak memory.
545 /* Tear down the gpadl for the channel's ring buffer */
546 if (channel->ringbuffer_gpadlhandle) {
547 ret = vmbus_teardown_gpadl(channel,
548 channel->ringbuffer_gpadlhandle);
550 pr_err("Close failed: teardown gpadl return %d\n", ret);
552 * If we failed to teardown gpadl,
553 * it is perhaps better to leak memory.
559 /* Cleanup the ring buffers for this channel */
560 hv_ringbuffer_cleanup(&channel->outbound);
561 hv_ringbuffer_cleanup(&channel->inbound);
563 free_pages((unsigned long)channel->ringbuffer_pages,
564 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
567 tasklet_enable(tasklet);
573 * vmbus_close - Close the specified channel
575 void vmbus_close(struct vmbus_channel *channel)
577 struct list_head *cur, *tmp;
578 struct vmbus_channel *cur_channel;
580 if (channel->primary_channel != NULL) {
582 * We will only close sub-channels when
583 * the primary is closed.
588 * Close all the sub-channels first and then close the
591 list_for_each_safe(cur, tmp, &channel->sc_list) {
592 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
593 if (cur_channel->state != CHANNEL_OPENED_STATE)
595 vmbus_close_internal(cur_channel);
598 * Now close the primary.
600 vmbus_close_internal(channel);
602 EXPORT_SYMBOL_GPL(vmbus_close);
604 int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
605 u32 bufferlen, u64 requestid,
606 enum vmbus_packet_type type, u32 flags, bool kick_q)
608 struct vmpacket_descriptor desc;
609 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
610 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
611 struct kvec bufferlist[3];
612 u64 aligned_data = 0;
615 int num_vecs = ((bufferlen != 0) ? 3 : 1);
618 /* Setup the descriptor */
619 desc.type = type; /* VmbusPacketTypeDataInBand; */
620 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
621 /* in 8-bytes granularity */
622 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
623 desc.len8 = (u16)(packetlen_aligned >> 3);
624 desc.trans_id = requestid;
626 bufferlist[0].iov_base = &desc;
627 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
628 bufferlist[1].iov_base = buffer;
629 bufferlist[1].iov_len = bufferlen;
630 bufferlist[2].iov_base = &aligned_data;
631 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
633 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
637 * Signalling the host is conditional on many factors:
638 * 1. The ring state changed from being empty to non-empty.
639 * This is tracked by the variable "signal".
640 * 2. The variable kick_q tracks if more data will be placed
641 * on the ring. We will not signal if more data is
644 * Based on the channel signal state, we will decide
645 * which signaling policy will be applied.
647 * If we cannot write to the ring-buffer; signal the host
648 * even if we may not have written anything. This is a rare
649 * enough condition that it should not matter.
652 if (channel->signal_policy)
657 if (((ret == 0) && kick_q && signal) || (ret))
658 vmbus_setevent(channel);
662 EXPORT_SYMBOL(vmbus_sendpacket_ctl);
665 * vmbus_sendpacket() - Send the specified buffer on the given channel
666 * @channel: Pointer to vmbus_channel structure.
667 * @buffer: Pointer to the buffer you want to receive the data into.
668 * @bufferlen: Maximum size of what the the buffer will hold
669 * @requestid: Identifier of the request
670 * @type: Type of packet that is being send e.g. negotiate, time
673 * Sends data in @buffer directly to hyper-v via the vmbus
674 * This will send the data unparsed to hyper-v.
676 * Mainly used by Hyper-V drivers.
678 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
679 u32 bufferlen, u64 requestid,
680 enum vmbus_packet_type type, u32 flags)
682 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
685 EXPORT_SYMBOL(vmbus_sendpacket);
688 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
689 * packets using a GPADL Direct packet type. This interface allows you
690 * to control notifying the host. This will be useful for sending
691 * batched data. Also the sender can control the send flags
694 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
695 struct hv_page_buffer pagebuffers[],
696 u32 pagecount, void *buffer, u32 bufferlen,
703 struct vmbus_channel_packet_page_buffer desc;
706 u32 packetlen_aligned;
707 struct kvec bufferlist[3];
708 u64 aligned_data = 0;
711 if (pagecount > MAX_PAGE_BUFFER_COUNT)
716 * Adjust the size down since vmbus_channel_packet_page_buffer is the
717 * largest size we support
719 descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
720 ((MAX_PAGE_BUFFER_COUNT - pagecount) *
721 sizeof(struct hv_page_buffer));
722 packetlen = descsize + bufferlen;
723 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
725 /* Setup the descriptor */
726 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
728 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
729 desc.length8 = (u16)(packetlen_aligned >> 3);
730 desc.transactionid = requestid;
731 desc.rangecount = pagecount;
733 for (i = 0; i < pagecount; i++) {
734 desc.range[i].len = pagebuffers[i].len;
735 desc.range[i].offset = pagebuffers[i].offset;
736 desc.range[i].pfn = pagebuffers[i].pfn;
739 bufferlist[0].iov_base = &desc;
740 bufferlist[0].iov_len = descsize;
741 bufferlist[1].iov_base = buffer;
742 bufferlist[1].iov_len = bufferlen;
743 bufferlist[2].iov_base = &aligned_data;
744 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
746 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
749 * Signalling the host is conditional on many factors:
750 * 1. The ring state changed from being empty to non-empty.
751 * This is tracked by the variable "signal".
752 * 2. The variable kick_q tracks if more data will be placed
753 * on the ring. We will not signal if more data is
756 * Based on the channel signal state, we will decide
757 * which signaling policy will be applied.
759 * If we cannot write to the ring-buffer; signal the host
760 * even if we may not have written anything. This is a rare
761 * enough condition that it should not matter.
764 if (channel->signal_policy)
769 if (((ret == 0) && kick_q && signal) || (ret))
770 vmbus_setevent(channel);
774 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
777 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
778 * packets using a GPADL Direct packet type.
780 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
781 struct hv_page_buffer pagebuffers[],
782 u32 pagecount, void *buffer, u32 bufferlen,
785 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
786 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
787 buffer, bufferlen, requestid,
791 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
794 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
795 * using a GPADL Direct packet type.
796 * The buffer includes the vmbus descriptor.
798 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
799 struct vmbus_packet_mpb_array *desc,
801 void *buffer, u32 bufferlen, u64 requestid)
805 u32 packetlen_aligned;
806 struct kvec bufferlist[3];
807 u64 aligned_data = 0;
810 packetlen = desc_size + bufferlen;
811 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
813 /* Setup the descriptor */
814 desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
815 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
816 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
817 desc->length8 = (u16)(packetlen_aligned >> 3);
818 desc->transactionid = requestid;
819 desc->rangecount = 1;
821 bufferlist[0].iov_base = desc;
822 bufferlist[0].iov_len = desc_size;
823 bufferlist[1].iov_base = buffer;
824 bufferlist[1].iov_len = bufferlen;
825 bufferlist[2].iov_base = &aligned_data;
826 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
828 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
830 if (ret == 0 && signal)
831 vmbus_setevent(channel);
835 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
838 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
839 * using a GPADL Direct packet type.
841 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
842 struct hv_multipage_buffer *multi_pagebuffer,
843 void *buffer, u32 bufferlen, u64 requestid)
846 struct vmbus_channel_packet_multipage_buffer desc;
849 u32 packetlen_aligned;
850 struct kvec bufferlist[3];
851 u64 aligned_data = 0;
853 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
854 multi_pagebuffer->len);
856 if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT)
860 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
861 * the largest size we support
863 descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
864 ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
866 packetlen = descsize + bufferlen;
867 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
870 /* Setup the descriptor */
871 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
872 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
873 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
874 desc.length8 = (u16)(packetlen_aligned >> 3);
875 desc.transactionid = requestid;
878 desc.range.len = multi_pagebuffer->len;
879 desc.range.offset = multi_pagebuffer->offset;
881 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
882 pfncount * sizeof(u64));
884 bufferlist[0].iov_base = &desc;
885 bufferlist[0].iov_len = descsize;
886 bufferlist[1].iov_base = buffer;
887 bufferlist[1].iov_len = bufferlen;
888 bufferlist[2].iov_base = &aligned_data;
889 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
891 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
893 if (ret == 0 && signal)
894 vmbus_setevent(channel);
898 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
901 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
902 * @channel: Pointer to vmbus_channel structure.
903 * @buffer: Pointer to the buffer you want to receive the data into.
904 * @bufferlen: Maximum size of what the the buffer will hold
905 * @buffer_actual_len: The actual size of the data after it was received
906 * @requestid: Identifier of the request
908 * Receives directly from the hyper-v vmbus and puts the data it received
909 * into Buffer. This will receive the data unparsed from hyper-v.
911 * Mainly used by Hyper-V drivers.
913 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
914 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid)
916 struct vmpacket_descriptor desc;
922 *buffer_actual_len = 0;
926 ret = hv_ringbuffer_peek(&channel->inbound, &desc,
927 sizeof(struct vmpacket_descriptor));
931 packetlen = desc.len8 << 3;
932 userlen = packetlen - (desc.offset8 << 3);
934 *buffer_actual_len = userlen;
936 if (userlen > bufferlen) {
938 pr_err("Buffer too small - got %d needs %d\n",
943 *requestid = desc.trans_id;
945 /* Copy over the packet to the user buffer */
946 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
947 (desc.offset8 << 3), &signal);
950 vmbus_setevent(channel);
954 EXPORT_SYMBOL(vmbus_recvpacket);
957 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
959 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
960 u32 bufferlen, u32 *buffer_actual_len,
963 struct vmpacket_descriptor desc;
968 *buffer_actual_len = 0;
972 ret = hv_ringbuffer_peek(&channel->inbound, &desc,
973 sizeof(struct vmpacket_descriptor));
978 packetlen = desc.len8 << 3;
980 *buffer_actual_len = packetlen;
982 if (packetlen > bufferlen)
985 *requestid = desc.trans_id;
987 /* Copy over the entire packet to the user buffer */
988 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
992 vmbus_setevent(channel);
996 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);