2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
44 #include <xen/events.h>
45 #include <xen/interface/memory.h>
47 #include <asm/xen/hypercall.h>
48 #include <asm/xen/page.h>
51 * This is the maximum slots a skb can have. If a guest sends a skb
52 * which exceeds this limit it is considered malicious.
54 #define MAX_SKB_SLOTS_DEFAULT 20
55 static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
56 module_param(max_skb_slots, uint, 0444);
58 typedef unsigned int pending_ring_idx_t;
59 #define INVALID_PENDING_RING_IDX (~0U)
61 struct pending_tx_info {
62 struct xen_netif_tx_request req; /* coalesced tx request */
64 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
65 * if it is head of one or more tx
70 struct netbk_rx_meta {
76 #define MAX_PENDING_REQS 256
78 /* Discriminate from any valid pending_idx value. */
79 #define INVALID_PENDING_IDX 0xFFFF
81 #define MAX_BUFFER_OFFSET PAGE_SIZE
83 /* extra field used in struct page */
86 #if BITS_PER_LONG < 64
88 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
89 unsigned int group:GROUP_WIDTH;
90 unsigned int idx:IDX_WIDTH;
92 unsigned int group, idx;
100 struct task_struct *task;
102 struct sk_buff_head rx_queue;
103 struct sk_buff_head tx_queue;
105 struct timer_list net_timer;
107 struct page *mmap_pages[MAX_PENDING_REQS];
109 pending_ring_idx_t pending_prod;
110 pending_ring_idx_t pending_cons;
111 struct list_head net_schedule_list;
113 /* Protect the net_schedule_list in netif. */
114 spinlock_t net_schedule_list_lock;
116 atomic_t netfront_count;
118 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
119 /* Coalescing tx requests before copying makes number of grant
120 * copy ops greater or equal to number of slots required. In
121 * worst case a tx request consumes 2 gnttab_copy.
123 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
125 u16 pending_ring[MAX_PENDING_REQS];
128 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
129 * head/fragment page uses 2 copy operations because it
130 * straddles two buffers in the frontend.
132 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
133 struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
136 static struct xen_netbk *xen_netbk;
137 static int xen_netbk_group_nr;
140 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
141 * one or more merged tx requests, otherwise it is the continuation of
142 * previous tx request.
144 static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
146 return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
149 void xen_netbk_add_xenvif(struct xenvif *vif)
152 int min_netfront_count;
154 struct xen_netbk *netbk;
156 min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
157 for (i = 0; i < xen_netbk_group_nr; i++) {
158 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
159 if (netfront_count < min_netfront_count) {
161 min_netfront_count = netfront_count;
165 netbk = &xen_netbk[min_group];
168 atomic_inc(&netbk->netfront_count);
171 void xen_netbk_remove_xenvif(struct xenvif *vif)
173 struct xen_netbk *netbk = vif->netbk;
175 atomic_dec(&netbk->netfront_count);
178 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
180 static void make_tx_response(struct xenvif *vif,
181 struct xen_netif_tx_request *txp,
183 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
190 static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
193 return page_to_pfn(netbk->mmap_pages[idx]);
196 static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
199 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
202 /* extra field used in struct page */
203 static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
206 unsigned int group = netbk - xen_netbk;
207 union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
209 BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
210 pg->mapping = ext.mapping;
213 static int get_page_ext(struct page *pg,
214 unsigned int *pgroup, unsigned int *pidx)
216 union page_ext ext = { .mapping = pg->mapping };
217 struct xen_netbk *netbk;
218 unsigned int group, idx;
220 group = ext.e.group - 1;
222 if (group < 0 || group >= xen_netbk_group_nr)
225 netbk = &xen_netbk[group];
229 if ((idx < 0) || (idx >= MAX_PENDING_REQS))
232 if (netbk->mmap_pages[idx] != pg)
242 * This is the amount of packet we copy rather than map, so that the
243 * guest can't fiddle with the contents of the headers while we do
244 * packet processing on them (netfilter, routing, etc).
246 #define PKT_PROT_LEN (ETH_HLEN + \
248 sizeof(struct iphdr) + MAX_IPOPTLEN + \
249 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
251 static u16 frag_get_pending_idx(skb_frag_t *frag)
253 return (u16)frag->page_offset;
256 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
258 frag->page_offset = pending_idx;
261 static inline pending_ring_idx_t pending_index(unsigned i)
263 return i & (MAX_PENDING_REQS-1);
266 static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
268 return MAX_PENDING_REQS -
269 netbk->pending_prod + netbk->pending_cons;
272 static void xen_netbk_kick_thread(struct xen_netbk *netbk)
277 static int max_required_rx_slots(struct xenvif *vif)
279 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
281 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
282 if (vif->can_sg || vif->gso || vif->gso_prefix)
283 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
288 int xen_netbk_rx_ring_full(struct xenvif *vif)
290 RING_IDX peek = vif->rx_req_cons_peek;
291 RING_IDX needed = max_required_rx_slots(vif);
293 return ((vif->rx.sring->req_prod - peek) < needed) ||
294 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
297 int xen_netbk_must_stop_queue(struct xenvif *vif)
299 if (!xen_netbk_rx_ring_full(vif))
302 vif->rx.sring->req_event = vif->rx_req_cons_peek +
303 max_required_rx_slots(vif);
304 mb(); /* request notification /then/ check the queue */
306 return xen_netbk_rx_ring_full(vif);
310 * Returns true if we should start a new receive buffer instead of
311 * adding 'size' bytes to a buffer which currently contains 'offset'
314 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
316 /* simple case: we have completely filled the current buffer. */
317 if (offset == MAX_BUFFER_OFFSET)
321 * complex case: start a fresh buffer if the current frag
322 * would overflow the current buffer but only if:
323 * (i) this frag would fit completely in the next buffer
324 * and (ii) there is already some data in the current buffer
325 * and (iii) this is not the head buffer.
328 * - (i) stops us splitting a frag into two copies
329 * unless the frag is too large for a single buffer.
330 * - (ii) stops us from leaving a buffer pointlessly empty.
331 * - (iii) stops us leaving the first buffer
332 * empty. Strictly speaking this is already covered
333 * by (ii) but is explicitly checked because
334 * netfront relies on the first buffer being
335 * non-empty and can crash otherwise.
337 * This means we will effectively linearise small
338 * frags but do not needlessly split large buffers
339 * into multiple copies tend to give large frags their
340 * own buffers as before.
342 if ((offset + size > MAX_BUFFER_OFFSET) &&
343 (size <= MAX_BUFFER_OFFSET) && offset && !head)
350 * Figure out how many ring slots we're going to need to send @skb to
351 * the guest. This function is essentially a dry run of
352 * netbk_gop_frag_copy.
354 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
359 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
361 copy_off = skb_headlen(skb) % PAGE_SIZE;
363 if (skb_shinfo(skb)->gso_size)
366 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
367 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
368 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
371 offset &= ~PAGE_MASK;
374 BUG_ON(offset >= PAGE_SIZE);
375 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
377 bytes = PAGE_SIZE - offset;
382 if (start_new_rx_buffer(copy_off, bytes, 0)) {
387 if (copy_off + bytes > MAX_BUFFER_OFFSET)
388 bytes = MAX_BUFFER_OFFSET - copy_off;
395 if (offset == PAGE_SIZE)
402 struct netrx_pending_operations {
403 unsigned copy_prod, copy_cons;
404 unsigned meta_prod, meta_cons;
405 struct gnttab_copy *copy;
406 struct netbk_rx_meta *meta;
408 grant_ref_t copy_gref;
411 static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
412 struct netrx_pending_operations *npo)
414 struct netbk_rx_meta *meta;
415 struct xen_netif_rx_request *req;
417 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
419 meta = npo->meta + npo->meta_prod++;
425 npo->copy_gref = req->gref;
431 * Set up the grant operations for this fragment. If it's a flipping
432 * interface, we also set up the unmap request from here.
434 static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
435 struct netrx_pending_operations *npo,
436 struct page *page, unsigned long size,
437 unsigned long offset, int *head)
439 struct gnttab_copy *copy_gop;
440 struct netbk_rx_meta *meta;
442 * These variables are used iff get_page_ext returns true,
443 * in which case they are guaranteed to be initialized.
445 unsigned int uninitialized_var(group), uninitialized_var(idx);
446 int foreign = get_page_ext(page, &group, &idx);
449 /* Data must not cross a page boundary. */
450 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
452 meta = npo->meta + npo->meta_prod - 1;
454 /* Skip unused frames from start of page */
455 page += offset >> PAGE_SHIFT;
456 offset &= ~PAGE_MASK;
459 BUG_ON(offset >= PAGE_SIZE);
460 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
462 bytes = PAGE_SIZE - offset;
467 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
469 * Netfront requires there to be some data in the head
474 meta = get_next_rx_buffer(vif, npo);
477 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
478 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
480 copy_gop = npo->copy + npo->copy_prod++;
481 copy_gop->flags = GNTCOPY_dest_gref;
483 struct xen_netbk *netbk = &xen_netbk[group];
484 struct pending_tx_info *src_pend;
486 src_pend = &netbk->pending_tx_info[idx];
488 copy_gop->source.domid = src_pend->vif->domid;
489 copy_gop->source.u.ref = src_pend->req.gref;
490 copy_gop->flags |= GNTCOPY_source_gref;
492 void *vaddr = page_address(page);
493 copy_gop->source.domid = DOMID_SELF;
494 copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
496 copy_gop->source.offset = offset;
497 copy_gop->dest.domid = vif->domid;
499 copy_gop->dest.offset = npo->copy_off;
500 copy_gop->dest.u.ref = npo->copy_gref;
501 copy_gop->len = bytes;
503 npo->copy_off += bytes;
510 if (offset == PAGE_SIZE && size) {
511 BUG_ON(!PageCompound(page));
516 /* Leave a gap for the GSO descriptor. */
517 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
520 *head = 0; /* There must be something in this buffer now. */
526 * Prepare an SKB to be transmitted to the frontend.
528 * This function is responsible for allocating grant operations, meta
531 * It returns the number of meta structures consumed. The number of
532 * ring slots used is always equal to the number of meta slots used
533 * plus the number of GSO descriptors used. Currently, we use either
534 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
535 * frontend-side LRO).
537 static int netbk_gop_skb(struct sk_buff *skb,
538 struct netrx_pending_operations *npo)
540 struct xenvif *vif = netdev_priv(skb->dev);
541 int nr_frags = skb_shinfo(skb)->nr_frags;
543 struct xen_netif_rx_request *req;
544 struct netbk_rx_meta *meta;
549 old_meta_prod = npo->meta_prod;
551 /* Set up a GSO prefix descriptor, if necessary */
552 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
553 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
554 meta = npo->meta + npo->meta_prod++;
555 meta->gso_size = skb_shinfo(skb)->gso_size;
560 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
561 meta = npo->meta + npo->meta_prod++;
563 if (!vif->gso_prefix)
564 meta->gso_size = skb_shinfo(skb)->gso_size;
571 npo->copy_gref = req->gref;
574 while (data < skb_tail_pointer(skb)) {
575 unsigned int offset = offset_in_page(data);
576 unsigned int len = PAGE_SIZE - offset;
578 if (data + len > skb_tail_pointer(skb))
579 len = skb_tail_pointer(skb) - data;
581 netbk_gop_frag_copy(vif, skb, npo,
582 virt_to_page(data), len, offset, &head);
586 for (i = 0; i < nr_frags; i++) {
587 netbk_gop_frag_copy(vif, skb, npo,
588 skb_frag_page(&skb_shinfo(skb)->frags[i]),
589 skb_frag_size(&skb_shinfo(skb)->frags[i]),
590 skb_shinfo(skb)->frags[i].page_offset,
594 return npo->meta_prod - old_meta_prod;
598 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
599 * used to set up the operations on the top of
600 * netrx_pending_operations, which have since been done. Check that
601 * they didn't give any errors and advance over them.
603 static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
604 struct netrx_pending_operations *npo)
606 struct gnttab_copy *copy_op;
607 int status = XEN_NETIF_RSP_OKAY;
610 for (i = 0; i < nr_meta_slots; i++) {
611 copy_op = npo->copy + npo->copy_cons++;
612 if (copy_op->status != GNTST_okay) {
614 "Bad status %d from copy to DOM%d.\n",
615 copy_op->status, vif->domid);
616 status = XEN_NETIF_RSP_ERROR;
623 static void netbk_add_frag_responses(struct xenvif *vif, int status,
624 struct netbk_rx_meta *meta,
628 unsigned long offset;
630 /* No fragments used */
631 if (nr_meta_slots <= 1)
636 for (i = 0; i < nr_meta_slots; i++) {
638 if (i == nr_meta_slots - 1)
641 flags = XEN_NETRXF_more_data;
644 make_rx_response(vif, meta[i].id, status, offset,
645 meta[i].size, flags);
649 struct skb_cb_overlay {
653 static void xen_netbk_rx_action(struct xen_netbk *netbk)
655 struct xenvif *vif = NULL, *tmp;
658 struct xen_netif_rx_response *resp;
659 struct sk_buff_head rxq;
665 unsigned long offset;
666 struct skb_cb_overlay *sco;
668 struct netrx_pending_operations npo = {
669 .copy = netbk->grant_copy_op,
673 skb_queue_head_init(&rxq);
677 while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
678 vif = netdev_priv(skb->dev);
679 nr_frags = skb_shinfo(skb)->nr_frags;
681 sco = (struct skb_cb_overlay *)skb->cb;
682 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
684 count += nr_frags + 1;
686 __skb_queue_tail(&rxq, skb);
688 /* Filled the batch queue? */
689 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
690 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
694 BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
699 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
700 gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
702 while ((skb = __skb_dequeue(&rxq)) != NULL) {
703 sco = (struct skb_cb_overlay *)skb->cb;
705 vif = netdev_priv(skb->dev);
707 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
708 resp = RING_GET_RESPONSE(&vif->rx,
709 vif->rx.rsp_prod_pvt++);
711 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
713 resp->offset = netbk->meta[npo.meta_cons].gso_size;
714 resp->id = netbk->meta[npo.meta_cons].id;
715 resp->status = sco->meta_slots_used;
718 sco->meta_slots_used--;
722 vif->dev->stats.tx_bytes += skb->len;
723 vif->dev->stats.tx_packets++;
725 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
727 if (sco->meta_slots_used == 1)
730 flags = XEN_NETRXF_more_data;
732 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
733 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
734 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
735 /* remote but checksummed. */
736 flags |= XEN_NETRXF_data_validated;
739 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
741 netbk->meta[npo.meta_cons].size,
744 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
745 struct xen_netif_extra_info *gso =
746 (struct xen_netif_extra_info *)
747 RING_GET_RESPONSE(&vif->rx,
748 vif->rx.rsp_prod_pvt++);
750 resp->flags |= XEN_NETRXF_extra_info;
752 gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
753 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
755 gso->u.gso.features = 0;
757 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
761 netbk_add_frag_responses(vif, status,
762 netbk->meta + npo.meta_cons + 1,
763 sco->meta_slots_used);
765 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
767 if (ret && list_empty(&vif->notify_list))
768 list_add_tail(&vif->notify_list, ¬ify);
770 xenvif_notify_tx_completion(vif);
773 npo.meta_cons += sco->meta_slots_used;
777 list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) {
778 notify_remote_via_irq(vif->irq);
779 list_del_init(&vif->notify_list);
782 /* More work to do? */
783 if (!skb_queue_empty(&netbk->rx_queue) &&
784 !timer_pending(&netbk->net_timer))
785 xen_netbk_kick_thread(netbk);
788 void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
790 struct xen_netbk *netbk = vif->netbk;
792 skb_queue_tail(&netbk->rx_queue, skb);
794 xen_netbk_kick_thread(netbk);
797 static void xen_netbk_alarm(unsigned long data)
799 struct xen_netbk *netbk = (struct xen_netbk *)data;
800 xen_netbk_kick_thread(netbk);
803 static int __on_net_schedule_list(struct xenvif *vif)
805 return !list_empty(&vif->schedule_list);
808 /* Must be called with net_schedule_list_lock held */
809 static void remove_from_net_schedule_list(struct xenvif *vif)
811 if (likely(__on_net_schedule_list(vif))) {
812 list_del_init(&vif->schedule_list);
817 static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
819 struct xenvif *vif = NULL;
821 spin_lock_irq(&netbk->net_schedule_list_lock);
822 if (list_empty(&netbk->net_schedule_list))
825 vif = list_first_entry(&netbk->net_schedule_list,
826 struct xenvif, schedule_list);
832 remove_from_net_schedule_list(vif);
834 spin_unlock_irq(&netbk->net_schedule_list_lock);
838 void xen_netbk_schedule_xenvif(struct xenvif *vif)
841 struct xen_netbk *netbk = vif->netbk;
843 if (__on_net_schedule_list(vif))
846 spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
847 if (!__on_net_schedule_list(vif) &&
848 likely(xenvif_schedulable(vif))) {
849 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
852 spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
856 if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
857 !list_empty(&netbk->net_schedule_list))
858 xen_netbk_kick_thread(netbk);
861 void xen_netbk_deschedule_xenvif(struct xenvif *vif)
863 struct xen_netbk *netbk = vif->netbk;
864 spin_lock_irq(&netbk->net_schedule_list_lock);
865 remove_from_net_schedule_list(vif);
866 spin_unlock_irq(&netbk->net_schedule_list_lock);
869 void xen_netbk_check_rx_xenvif(struct xenvif *vif)
873 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
876 xen_netbk_schedule_xenvif(vif);
879 static void tx_add_credit(struct xenvif *vif)
881 unsigned long max_burst, max_credit;
884 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
885 * Otherwise the interface can seize up due to insufficient credit.
887 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
888 max_burst = min(max_burst, 131072UL);
889 max_burst = max(max_burst, vif->credit_bytes);
891 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
892 max_credit = vif->remaining_credit + vif->credit_bytes;
893 if (max_credit < vif->remaining_credit)
894 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
896 vif->remaining_credit = min(max_credit, max_burst);
899 static void tx_credit_callback(unsigned long data)
901 struct xenvif *vif = (struct xenvif *)data;
903 xen_netbk_check_rx_xenvif(vif);
906 static void netbk_tx_err(struct xenvif *vif,
907 struct xen_netif_tx_request *txp, RING_IDX end)
909 RING_IDX cons = vif->tx.req_cons;
912 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
915 txp = RING_GET_REQUEST(&vif->tx, cons++);
917 vif->tx.req_cons = cons;
918 xen_netbk_check_rx_xenvif(vif);
922 static void netbk_fatal_tx_err(struct xenvif *vif)
924 netdev_err(vif->dev, "fatal error; disabling device\n");
925 xenvif_carrier_off(vif);
929 static int netbk_count_requests(struct xenvif *vif,
930 struct xen_netif_tx_request *first,
932 struct xen_netif_tx_request *txp,
935 RING_IDX cons = vif->tx.req_cons;
939 if (!(first->flags & XEN_NETTXF_more_data))
943 if (slots >= work_to_do) {
945 "Asked for %d slots but exceeds this limit\n",
947 netbk_fatal_tx_err(vif);
951 /* This guest is really using too many slots and
952 * considered malicious.
954 if (unlikely(slots >= max_skb_slots)) {
956 "Malicious frontend using %d slots, threshold %u\n",
957 slots, max_skb_slots);
958 netbk_fatal_tx_err(vif);
962 /* Xen network protocol had implicit dependency on
963 * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
964 * historical MAX_SKB_FRAGS value 18 to honor the same
965 * behavior as before. Any packet using more than 18
966 * slots but less than max_skb_slots slots is dropped
968 if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
971 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
972 slots, XEN_NETIF_NR_SLOTS_MIN);
976 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
978 if (txp->size > first->size) {
980 "Invalid tx request, slot size %u > remaining size %u\n",
981 txp->size, first->size);
982 netbk_fatal_tx_err(vif);
986 first->size -= txp->size;
989 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
990 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
991 txp->offset, txp->size);
992 netbk_fatal_tx_err(vif);
995 } while ((txp++)->flags & XEN_NETTXF_more_data);
998 netbk_tx_err(vif, first, first_idx + slots);
1005 static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
1009 page = alloc_page(GFP_KERNEL|__GFP_COLD);
1012 set_page_ext(page, netbk, pending_idx);
1013 netbk->mmap_pages[pending_idx] = page;
1017 static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1019 struct sk_buff *skb,
1020 struct xen_netif_tx_request *txp,
1021 struct gnttab_copy *gop)
1023 struct skb_shared_info *shinfo = skb_shinfo(skb);
1024 skb_frag_t *frags = shinfo->frags;
1025 u16 pending_idx = *((u16 *)skb->data);
1029 pending_ring_idx_t index, start_idx = 0;
1030 uint16_t dst_offset;
1031 unsigned int nr_slots;
1032 struct pending_tx_info *first = NULL;
1034 /* At this point shinfo->nr_frags is in fact the number of
1035 * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
1037 nr_slots = shinfo->nr_frags;
1039 /* Skip first skb fragment if it is on same page as header fragment. */
1040 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1042 /* Coalesce tx requests, at this point the packet passed in
1043 * should be <= 64K. Any packets larger than 64K have been
1044 * handled in netbk_count_requests().
1046 for (shinfo->nr_frags = slot = start; slot < nr_slots;
1047 shinfo->nr_frags++) {
1048 struct pending_tx_info *pending_tx_info =
1049 netbk->pending_tx_info;
1051 page = alloc_page(GFP_KERNEL|__GFP_COLD);
1057 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
1058 gop->flags = GNTCOPY_source_gref;
1060 gop->source.u.ref = txp->gref;
1061 gop->source.domid = vif->domid;
1062 gop->source.offset = txp->offset;
1064 gop->dest.domid = DOMID_SELF;
1066 gop->dest.offset = dst_offset;
1067 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1069 if (dst_offset + txp->size > PAGE_SIZE) {
1070 /* This page can only merge a portion
1071 * of tx request. Do not increment any
1072 * pointer / counter here. The txp
1073 * will be dealt with in future
1074 * rounds, eventually hitting the
1077 gop->len = PAGE_SIZE - dst_offset;
1078 txp->offset += gop->len;
1079 txp->size -= gop->len;
1080 dst_offset += gop->len; /* quit loop */
1082 /* This tx request can be merged in the page */
1083 gop->len = txp->size;
1084 dst_offset += gop->len;
1086 index = pending_index(netbk->pending_cons++);
1088 pending_idx = netbk->pending_ring[index];
1090 memcpy(&pending_tx_info[pending_idx].req, txp,
1094 pending_tx_info[pending_idx].vif = vif;
1096 /* Poison these fields, corresponding
1097 * fields for head tx req will be set
1098 * to correct values after the loop.
1100 netbk->mmap_pages[pending_idx] = (void *)(~0UL);
1101 pending_tx_info[pending_idx].head =
1102 INVALID_PENDING_RING_IDX;
1105 first = &pending_tx_info[pending_idx];
1107 head_idx = pending_idx;
1117 first->req.offset = 0;
1118 first->req.size = dst_offset;
1119 first->head = start_idx;
1120 set_page_ext(page, netbk, head_idx);
1121 netbk->mmap_pages[head_idx] = page;
1122 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
1125 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
1129 /* Unwind, freeing all pages and sending error responses. */
1130 while (shinfo->nr_frags-- > start) {
1131 xen_netbk_idx_release(netbk,
1132 frag_get_pending_idx(&frags[shinfo->nr_frags]),
1133 XEN_NETIF_RSP_ERROR);
1135 /* The head too, if necessary. */
1137 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1142 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1143 struct sk_buff *skb,
1144 struct gnttab_copy **gopp)
1146 struct gnttab_copy *gop = *gopp;
1147 u16 pending_idx = *((u16 *)skb->data);
1148 struct skb_shared_info *shinfo = skb_shinfo(skb);
1149 struct pending_tx_info *tx_info;
1150 int nr_frags = shinfo->nr_frags;
1152 u16 peek; /* peek into next tx request */
1154 /* Check status of header. */
1157 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1159 /* Skip first skb fragment if it is on same page as header fragment. */
1160 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1162 for (i = start; i < nr_frags; i++) {
1164 pending_ring_idx_t head;
1166 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1167 tx_info = &netbk->pending_tx_info[pending_idx];
1168 head = tx_info->head;
1170 /* Check error status: if okay then remember grant handle. */
1172 newerr = (++gop)->status;
1175 peek = netbk->pending_ring[pending_index(++head)];
1176 } while (!pending_tx_is_head(netbk, peek));
1178 if (likely(!newerr)) {
1179 /* Had a previous error? Invalidate this fragment. */
1181 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1185 /* Error on this fragment: respond to client with an error. */
1186 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1188 /* Not the first error? Preceding frags already invalidated. */
1192 /* First error: invalidate header and preceding fragments. */
1193 pending_idx = *((u16 *)skb->data);
1194 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1195 for (j = start; j < i; j++) {
1196 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1197 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1200 /* Remember the error: invalidate all subsequent fragments. */
1208 static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1210 struct skb_shared_info *shinfo = skb_shinfo(skb);
1211 int nr_frags = shinfo->nr_frags;
1214 for (i = 0; i < nr_frags; i++) {
1215 skb_frag_t *frag = shinfo->frags + i;
1216 struct xen_netif_tx_request *txp;
1220 pending_idx = frag_get_pending_idx(frag);
1222 txp = &netbk->pending_tx_info[pending_idx].req;
1223 page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1224 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1225 skb->len += txp->size;
1226 skb->data_len += txp->size;
1227 skb->truesize += txp->size;
1229 /* Take an extra reference to offset xen_netbk_idx_release */
1230 get_page(netbk->mmap_pages[pending_idx]);
1231 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1235 static int xen_netbk_get_extras(struct xenvif *vif,
1236 struct xen_netif_extra_info *extras,
1239 struct xen_netif_extra_info extra;
1240 RING_IDX cons = vif->tx.req_cons;
1243 if (unlikely(work_to_do-- <= 0)) {
1244 netdev_err(vif->dev, "Missing extra info\n");
1245 netbk_fatal_tx_err(vif);
1249 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1251 if (unlikely(!extra.type ||
1252 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1253 vif->tx.req_cons = ++cons;
1254 netdev_err(vif->dev,
1255 "Invalid extra type: %d\n", extra.type);
1256 netbk_fatal_tx_err(vif);
1260 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1261 vif->tx.req_cons = ++cons;
1262 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1267 static int netbk_set_skb_gso(struct xenvif *vif,
1268 struct sk_buff *skb,
1269 struct xen_netif_extra_info *gso)
1271 if (!gso->u.gso.size) {
1272 netdev_err(vif->dev, "GSO size must not be zero.\n");
1273 netbk_fatal_tx_err(vif);
1277 /* Currently only TCPv4 S.O. is supported. */
1278 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1279 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1280 netbk_fatal_tx_err(vif);
1284 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1285 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1287 /* Header must be checked, and gso_segs computed. */
1288 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1289 skb_shinfo(skb)->gso_segs = 0;
1294 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1298 int recalculate_partial_csum = 0;
1301 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1302 * peers can fail to set NETRXF_csum_blank when sending a GSO
1303 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1304 * recalculate the partial checksum.
1306 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1307 vif->rx_gso_checksum_fixup++;
1308 skb->ip_summed = CHECKSUM_PARTIAL;
1309 recalculate_partial_csum = 1;
1312 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1313 if (skb->ip_summed != CHECKSUM_PARTIAL)
1316 if (skb->protocol != htons(ETH_P_IP))
1319 iph = (void *)skb->data;
1320 switch (iph->protocol) {
1322 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1323 offsetof(struct tcphdr, check)))
1326 if (recalculate_partial_csum) {
1327 struct tcphdr *tcph = tcp_hdr(skb);
1328 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1329 skb->len - iph->ihl*4,
1334 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1335 offsetof(struct udphdr, check)))
1338 if (recalculate_partial_csum) {
1339 struct udphdr *udph = udp_hdr(skb);
1340 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1341 skb->len - iph->ihl*4,
1346 if (net_ratelimit())
1347 netdev_err(vif->dev,
1348 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1359 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1361 unsigned long now = jiffies;
1362 unsigned long next_credit =
1363 vif->credit_timeout.expires +
1364 msecs_to_jiffies(vif->credit_usec / 1000);
1366 /* Timer could already be pending in rare cases. */
1367 if (timer_pending(&vif->credit_timeout))
1370 /* Passed the point where we can replenish credit? */
1371 if (time_after_eq(now, next_credit)) {
1372 vif->credit_timeout.expires = now;
1376 /* Still too big to send right now? Set a callback. */
1377 if (size > vif->remaining_credit) {
1378 vif->credit_timeout.data =
1380 vif->credit_timeout.function =
1382 mod_timer(&vif->credit_timeout,
1391 static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1393 struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1394 struct sk_buff *skb;
1397 while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1398 < MAX_PENDING_REQS) &&
1399 !list_empty(&netbk->net_schedule_list)) {
1401 struct xen_netif_tx_request txreq;
1402 struct xen_netif_tx_request txfrags[max_skb_slots];
1404 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1408 unsigned int data_len;
1409 pending_ring_idx_t index;
1411 /* Get a netif from the list with work to do. */
1412 vif = poll_net_schedule_list(netbk);
1413 /* This can sometimes happen because the test of
1414 * list_empty(net_schedule_list) at the top of the
1415 * loop is unlocked. Just go back and have another
1421 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1422 XEN_NETIF_TX_RING_SIZE) {
1423 netdev_err(vif->dev,
1424 "Impossible number of requests. "
1425 "req_prod %d, req_cons %d, size %ld\n",
1426 vif->tx.sring->req_prod, vif->tx.req_cons,
1427 XEN_NETIF_TX_RING_SIZE);
1428 netbk_fatal_tx_err(vif);
1432 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1438 idx = vif->tx.req_cons;
1439 rmb(); /* Ensure that we see the request before we copy it. */
1440 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1442 /* Credit-based scheduling. */
1443 if (txreq.size > vif->remaining_credit &&
1444 tx_credit_exceeded(vif, txreq.size)) {
1449 vif->remaining_credit -= txreq.size;
1452 vif->tx.req_cons = ++idx;
1454 memset(extras, 0, sizeof(extras));
1455 if (txreq.flags & XEN_NETTXF_extra_info) {
1456 work_to_do = xen_netbk_get_extras(vif, extras,
1458 idx = vif->tx.req_cons;
1459 if (unlikely(work_to_do < 0))
1463 ret = netbk_count_requests(vif, &txreq, idx,
1464 txfrags, work_to_do);
1465 if (unlikely(ret < 0))
1470 if (unlikely(txreq.size < ETH_HLEN)) {
1471 netdev_dbg(vif->dev,
1472 "Bad packet size: %d\n", txreq.size);
1473 netbk_tx_err(vif, &txreq, idx);
1477 /* No crossing a page as the payload mustn't fragment. */
1478 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1479 netdev_err(vif->dev,
1480 "txreq.offset: %x, size: %u, end: %lu\n",
1481 txreq.offset, txreq.size,
1482 (txreq.offset&~PAGE_MASK) + txreq.size);
1483 netbk_fatal_tx_err(vif);
1487 index = pending_index(netbk->pending_cons);
1488 pending_idx = netbk->pending_ring[index];
1490 data_len = (txreq.size > PKT_PROT_LEN &&
1491 ret < XEN_NETIF_NR_SLOTS_MIN) ?
1492 PKT_PROT_LEN : txreq.size;
1494 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1495 GFP_ATOMIC | __GFP_NOWARN);
1496 if (unlikely(skb == NULL)) {
1497 netdev_dbg(vif->dev,
1498 "Can't allocate a skb in start_xmit.\n");
1499 netbk_tx_err(vif, &txreq, idx);
1503 /* Packets passed to netif_rx() must have some headroom. */
1504 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1506 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1507 struct xen_netif_extra_info *gso;
1508 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1510 if (netbk_set_skb_gso(vif, skb, gso)) {
1511 /* Failure in netbk_set_skb_gso is fatal. */
1517 /* XXX could copy straight to head */
1518 page = xen_netbk_alloc_page(netbk, pending_idx);
1521 netbk_tx_err(vif, &txreq, idx);
1525 gop->source.u.ref = txreq.gref;
1526 gop->source.domid = vif->domid;
1527 gop->source.offset = txreq.offset;
1529 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1530 gop->dest.domid = DOMID_SELF;
1531 gop->dest.offset = txreq.offset;
1533 gop->len = txreq.size;
1534 gop->flags = GNTCOPY_source_gref;
1538 memcpy(&netbk->pending_tx_info[pending_idx].req,
1539 &txreq, sizeof(txreq));
1540 netbk->pending_tx_info[pending_idx].vif = vif;
1541 netbk->pending_tx_info[pending_idx].head = index;
1542 *((u16 *)skb->data) = pending_idx;
1544 __skb_put(skb, data_len);
1546 skb_shinfo(skb)->nr_frags = ret;
1547 if (data_len < txreq.size) {
1548 skb_shinfo(skb)->nr_frags++;
1549 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1552 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1553 INVALID_PENDING_IDX);
1556 netbk->pending_cons++;
1558 request_gop = xen_netbk_get_requests(netbk, vif,
1560 if (request_gop == NULL) {
1562 netbk_tx_err(vif, &txreq, idx);
1567 __skb_queue_tail(&netbk->tx_queue, skb);
1569 vif->tx.req_cons = idx;
1570 xen_netbk_check_rx_xenvif(vif);
1572 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1576 return gop - netbk->tx_copy_ops;
1579 static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1581 struct gnttab_copy *gop = netbk->tx_copy_ops;
1582 struct sk_buff *skb;
1584 while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1585 struct xen_netif_tx_request *txp;
1590 pending_idx = *((u16 *)skb->data);
1591 vif = netbk->pending_tx_info[pending_idx].vif;
1592 txp = &netbk->pending_tx_info[pending_idx].req;
1594 /* Check the remap error code. */
1595 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1596 netdev_dbg(vif->dev, "netback grant failed.\n");
1597 skb_shinfo(skb)->nr_frags = 0;
1602 data_len = skb->len;
1604 (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1606 if (data_len < txp->size) {
1607 /* Append the packet payload as a fragment. */
1608 txp->offset += data_len;
1609 txp->size -= data_len;
1611 /* Schedule a response immediately. */
1612 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1615 if (txp->flags & XEN_NETTXF_csum_blank)
1616 skb->ip_summed = CHECKSUM_PARTIAL;
1617 else if (txp->flags & XEN_NETTXF_data_validated)
1618 skb->ip_summed = CHECKSUM_UNNECESSARY;
1620 xen_netbk_fill_frags(netbk, skb);
1623 * If the initial fragment was < PKT_PROT_LEN then
1624 * pull through some bytes from the other fragments to
1625 * increase the linear region to PKT_PROT_LEN bytes.
1627 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1628 int target = min_t(int, skb->len, PKT_PROT_LEN);
1629 __pskb_pull_tail(skb, target - skb_headlen(skb));
1632 skb->dev = vif->dev;
1633 skb->protocol = eth_type_trans(skb, skb->dev);
1634 skb_reset_network_header(skb);
1636 if (checksum_setup(vif, skb)) {
1637 netdev_dbg(vif->dev,
1638 "Can't setup checksum in net_tx_action\n");
1643 skb_probe_transport_header(skb, 0);
1645 vif->dev->stats.rx_bytes += skb->len;
1646 vif->dev->stats.rx_packets++;
1648 xenvif_receive_skb(vif, skb);
1652 /* Called after netfront has transmitted */
1653 static void xen_netbk_tx_action(struct xen_netbk *netbk)
1657 nr_gops = xen_netbk_tx_build_gops(netbk);
1662 gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
1664 xen_netbk_tx_submit(netbk);
1667 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1671 struct pending_tx_info *pending_tx_info;
1672 pending_ring_idx_t head;
1673 u16 peek; /* peek into next tx request */
1675 BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
1677 /* Already complete? */
1678 if (netbk->mmap_pages[pending_idx] == NULL)
1681 pending_tx_info = &netbk->pending_tx_info[pending_idx];
1683 vif = pending_tx_info->vif;
1684 head = pending_tx_info->head;
1686 BUG_ON(!pending_tx_is_head(netbk, head));
1687 BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
1690 pending_ring_idx_t index;
1691 pending_ring_idx_t idx = pending_index(head);
1692 u16 info_idx = netbk->pending_ring[idx];
1694 pending_tx_info = &netbk->pending_tx_info[info_idx];
1695 make_tx_response(vif, &pending_tx_info->req, status);
1697 /* Setting any number other than
1698 * INVALID_PENDING_RING_IDX indicates this slot is
1699 * starting a new packet / ending a previous packet.
1701 pending_tx_info->head = 0;
1703 index = pending_index(netbk->pending_prod++);
1704 netbk->pending_ring[index] = netbk->pending_ring[info_idx];
1708 peek = netbk->pending_ring[pending_index(++head)];
1710 } while (!pending_tx_is_head(netbk, peek));
1712 netbk->mmap_pages[pending_idx]->mapping = 0;
1713 put_page(netbk->mmap_pages[pending_idx]);
1714 netbk->mmap_pages[pending_idx] = NULL;
1718 static void make_tx_response(struct xenvif *vif,
1719 struct xen_netif_tx_request *txp,
1722 RING_IDX i = vif->tx.rsp_prod_pvt;
1723 struct xen_netif_tx_response *resp;
1726 resp = RING_GET_RESPONSE(&vif->tx, i);
1730 if (txp->flags & XEN_NETTXF_extra_info)
1731 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1733 vif->tx.rsp_prod_pvt = ++i;
1734 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1736 notify_remote_via_irq(vif->irq);
1739 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1746 RING_IDX i = vif->rx.rsp_prod_pvt;
1747 struct xen_netif_rx_response *resp;
1749 resp = RING_GET_RESPONSE(&vif->rx, i);
1750 resp->offset = offset;
1751 resp->flags = flags;
1753 resp->status = (s16)size;
1755 resp->status = (s16)st;
1757 vif->rx.rsp_prod_pvt = ++i;
1762 static inline int rx_work_todo(struct xen_netbk *netbk)
1764 return !skb_queue_empty(&netbk->rx_queue);
1767 static inline int tx_work_todo(struct xen_netbk *netbk)
1770 if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1771 < MAX_PENDING_REQS) &&
1772 !list_empty(&netbk->net_schedule_list))
1778 static int xen_netbk_kthread(void *data)
1780 struct xen_netbk *netbk = data;
1781 while (!kthread_should_stop()) {
1782 wait_event_interruptible(netbk->wq,
1783 rx_work_todo(netbk) ||
1784 tx_work_todo(netbk) ||
1785 kthread_should_stop());
1788 if (kthread_should_stop())
1791 if (rx_work_todo(netbk))
1792 xen_netbk_rx_action(netbk);
1794 if (tx_work_todo(netbk))
1795 xen_netbk_tx_action(netbk);
1801 void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1804 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1807 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1811 int xen_netbk_map_frontend_rings(struct xenvif *vif,
1812 grant_ref_t tx_ring_ref,
1813 grant_ref_t rx_ring_ref)
1816 struct xen_netif_tx_sring *txs;
1817 struct xen_netif_rx_sring *rxs;
1821 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1822 tx_ring_ref, &addr);
1826 txs = (struct xen_netif_tx_sring *)addr;
1827 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1829 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1830 rx_ring_ref, &addr);
1834 rxs = (struct xen_netif_rx_sring *)addr;
1835 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1837 vif->rx_req_cons_peek = 0;
1842 xen_netbk_unmap_frontend_rings(vif);
1846 static int __init netback_init(void)
1855 if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
1857 "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
1858 max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
1859 max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
1862 xen_netbk_group_nr = num_online_cpus();
1863 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1867 for (group = 0; group < xen_netbk_group_nr; group++) {
1868 struct xen_netbk *netbk = &xen_netbk[group];
1869 skb_queue_head_init(&netbk->rx_queue);
1870 skb_queue_head_init(&netbk->tx_queue);
1872 init_timer(&netbk->net_timer);
1873 netbk->net_timer.data = (unsigned long)netbk;
1874 netbk->net_timer.function = xen_netbk_alarm;
1876 netbk->pending_cons = 0;
1877 netbk->pending_prod = MAX_PENDING_REQS;
1878 for (i = 0; i < MAX_PENDING_REQS; i++)
1879 netbk->pending_ring[i] = i;
1881 init_waitqueue_head(&netbk->wq);
1882 netbk->task = kthread_create(xen_netbk_kthread,
1884 "netback/%u", group);
1886 if (IS_ERR(netbk->task)) {
1887 printk(KERN_ALERT "kthread_create() fails at netback\n");
1888 del_timer(&netbk->net_timer);
1889 rc = PTR_ERR(netbk->task);
1893 kthread_bind(netbk->task, group);
1895 INIT_LIST_HEAD(&netbk->net_schedule_list);
1897 spin_lock_init(&netbk->net_schedule_list_lock);
1899 atomic_set(&netbk->netfront_count, 0);
1901 wake_up_process(netbk->task);
1904 rc = xenvif_xenbus_init();
1911 while (--group >= 0) {
1912 struct xen_netbk *netbk = &xen_netbk[group];
1913 for (i = 0; i < MAX_PENDING_REQS; i++) {
1914 if (netbk->mmap_pages[i])
1915 __free_page(netbk->mmap_pages[i]);
1917 del_timer(&netbk->net_timer);
1918 kthread_stop(netbk->task);
1925 module_init(netback_init);
1927 MODULE_LICENSE("Dual BSD/GPL");
1928 MODULE_ALIAS("xen-backend:vif");