diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 468 |
1 files changed, 316 insertions, 152 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1260bf0..b2528f6 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -46,11 +46,33 @@ #include <asm/xen/hypercall.h> #include <asm/xen/page.h> +/* + * This is the maximum slots a skb can have. If a guest sends a skb + * which exceeds this limit it is considered malicious. + */ +#define FATAL_SKB_SLOTS_DEFAULT 20 +static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; +module_param(fatal_skb_slots, uint, 0444); + +/* + * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating + * the maximum slots a valid packet can use. Now this value is defined + * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by + * all backend. + */ +#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN + +typedef unsigned int pending_ring_idx_t; +#define INVALID_PENDING_RING_IDX (~0U) + struct pending_tx_info { - struct xen_netif_tx_request req; + struct xen_netif_tx_request req; /* coalesced tx request */ struct xenvif *vif; + pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX + * if it is head of one or more tx + * reqs + */ }; -typedef unsigned int pending_ring_idx_t; struct netbk_rx_meta { int id; @@ -60,6 +82,9 @@ struct netbk_rx_meta { #define MAX_PENDING_REQS 256 +/* Discriminate from any valid pending_idx value. */ +#define INVALID_PENDING_IDX 0xFFFF + #define MAX_BUFFER_OFFSET PAGE_SIZE /* extra field used in struct page */ @@ -98,7 +123,11 @@ struct xen_netbk { atomic_t netfront_count; struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; - struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; + /* Coalescing tx requests before copying makes number of grant + * copy ops greater or equal to number of slots required. In + * worst case a tx request consumes 2 gnttab_copy. + */ + struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; u16 pending_ring[MAX_PENDING_REQS]; @@ -114,6 +143,16 @@ struct xen_netbk { static struct xen_netbk *xen_netbk; static int xen_netbk_group_nr; +/* + * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of + * one or more merged tx requests, otherwise it is the continuation of + * previous tx request. + */ +static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx) +{ + return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; +} + void xen_netbk_add_xenvif(struct xenvif *vif) { int i; @@ -156,13 +195,13 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 flags); static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, - unsigned int idx) + u16 idx) { return page_to_pfn(netbk->mmap_pages[idx]); } static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, - unsigned int idx) + u16 idx) { return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); } @@ -216,6 +255,16 @@ static int get_page_ext(struct page *pg, sizeof(struct iphdr) + MAX_IPOPTLEN + \ sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) +static u16 frag_get_pending_idx(skb_frag_t *frag) +{ + return (u16)frag->page_offset; +} + +static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) +{ + frag->page_offset = pending_idx; +} + static inline pending_ring_idx_t pending_index(unsigned i) { return i & (MAX_PENDING_REQS-1); @@ -236,6 +285,7 @@ static int max_required_rx_slots(struct xenvif *vif) { int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); + /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ if (vif->can_sg || vif->gso || vif->gso_prefix) max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ @@ -296,8 +346,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) * into multiple copies tend to give large frags their * own buffers as before. */ - if ((offset + size > MAX_BUFFER_OFFSET) && - (size <= MAX_BUFFER_OFFSET) && offset && !head) + BUG_ON(size > MAX_BUFFER_OFFSET); + if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) return true; return false; @@ -322,7 +372,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) count++; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - unsigned long size = skb_shinfo(skb)->frags[i].size; + unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); unsigned long bytes; while (size > 0) { BUG_ON(copy_off > MAX_BUFFER_OFFSET); @@ -513,8 +563,8 @@ static int netbk_gop_skb(struct sk_buff *skb, for (i = 0; i < nr_frags; i++) { netbk_gop_frag_copy(vif, skb, npo, - skb_shinfo(skb)->frags[i].page, - skb_shinfo(skb)->frags[i].size, + skb_frag_page(&skb_shinfo(skb)->frags[i]), + skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_shinfo(skb)->frags[i].page_offset, &head); } @@ -614,6 +664,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) __skb_queue_tail(&rxq, skb); /* Filled the batch queue? */ + /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) break; } @@ -861,48 +912,100 @@ static int netbk_count_requests(struct xenvif *vif, int work_to_do) { RING_IDX cons = vif->tx.req_cons; - int frags = 0; + int slots = 0; + int drop_err = 0; + int more_data; if (!(first->flags & XEN_NETTXF_more_data)) return 0; do { - if (frags >= work_to_do) { - netdev_err(vif->dev, "Need more frags\n"); + struct xen_netif_tx_request dropped_tx = { 0 }; + + if (slots >= work_to_do) { + netdev_err(vif->dev, + "Asked for %d slots but exceeds this limit\n", + work_to_do); netbk_fatal_tx_err(vif); return -ENODATA; } - if (unlikely(frags >= MAX_SKB_FRAGS)) { - netdev_err(vif->dev, "Too many frags\n"); + /* This guest is really using too many slots and + * considered malicious. + */ + if (unlikely(slots >= fatal_skb_slots)) { + netdev_err(vif->dev, + "Malicious frontend using %d slots, threshold %u\n", + slots, fatal_skb_slots); netbk_fatal_tx_err(vif); return -E2BIG; } - memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), + /* Xen network protocol had implicit dependency on + * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to + * the historical MAX_SKB_FRAGS value 18 to honor the + * same behavior as before. Any packet using more than + * 18 slots but less than fatal_skb_slots slots is + * dropped + */ + if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { + if (net_ratelimit()) + netdev_dbg(vif->dev, + "Too many slots (%d) exceeding limit (%d), dropping packet\n", + slots, XEN_NETBK_LEGACY_SLOTS_MAX); + drop_err = -E2BIG; + } + + if (drop_err) + txp = &dropped_tx; + + memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), sizeof(*txp)); - if (txp->size > first->size) { - netdev_err(vif->dev, "Frag is bigger than frame.\n"); - netbk_fatal_tx_err(vif); - return -EIO; + + /* If the guest submitted a frame >= 64 KiB then + * first->size overflowed and following slots will + * appear to be larger than the frame. + * + * This cannot be fatal error as there are buggy + * frontends that do this. + * + * Consume all slots and drop the packet. + */ + if (!drop_err && txp->size > first->size) { + if (net_ratelimit()) + netdev_dbg(vif->dev, + "Invalid tx request, slot size %u > remaining size %u\n", + txp->size, first->size); + drop_err = -EIO; } first->size -= txp->size; - frags++; + slots++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { - netdev_err(vif->dev, "txp->offset: %x, size: %u\n", + netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", txp->offset, txp->size); netbk_fatal_tx_err(vif); return -EINVAL; } - } while ((txp++)->flags & XEN_NETTXF_more_data); - return frags; + + more_data = txp->flags & XEN_NETTXF_more_data; + + if (!drop_err) + txp++; + + } while (more_data); + + if (drop_err) { + netbk_tx_err(vif, first, cons + slots); + return drop_err; + } + + return slots; } static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, - struct sk_buff *skb, - unsigned long pending_idx) + u16 pending_idx) { struct page *page; page = alloc_page(GFP_KERNEL|__GFP_COLD); @@ -921,51 +1024,115 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; - unsigned long pending_idx = *((u16 *)skb->data); - int i, start; + u16 pending_idx = *((u16 *)skb->data); + u16 head_idx = 0; + int slot, start; + struct page *page; + pending_ring_idx_t index, start_idx = 0; + uint16_t dst_offset; + unsigned int nr_slots; + struct pending_tx_info *first = NULL; + + /* At this point shinfo->nr_frags is in fact the number of + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. + */ + nr_slots = shinfo->nr_frags; /* Skip first skb fragment if it is on same page as header fragment. */ - start = ((unsigned long)shinfo->frags[0].page == pending_idx); + start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); - for (i = start; i < shinfo->nr_frags; i++, txp++) { - struct page *page; - pending_ring_idx_t index; + /* Coalesce tx requests, at this point the packet passed in + * should be <= 64K. Any packets larger than 64K have been + * handled in netbk_count_requests(). + */ + for (shinfo->nr_frags = slot = start; slot < nr_slots; + shinfo->nr_frags++) { struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; - index = pending_index(netbk->pending_cons++); - pending_idx = netbk->pending_ring[index]; - page = xen_netbk_alloc_page(netbk, skb, pending_idx); + page = alloc_page(GFP_KERNEL|__GFP_COLD); if (!page) goto err; - netbk->mmap_pages[pending_idx] = page; - - gop->source.u.ref = txp->gref; - gop->source.domid = vif->domid; - gop->source.offset = txp->offset; - - gop->dest.u.gmfn = virt_to_mfn(page_address(page)); - gop->dest.domid = DOMID_SELF; - gop->dest.offset = txp->offset; - - gop->len = txp->size; - gop->flags = GNTCOPY_source_gref; + dst_offset = 0; + first = NULL; + while (dst_offset < PAGE_SIZE && slot < nr_slots) { + gop->flags = GNTCOPY_source_gref; + + gop->source.u.ref = txp->gref; + gop->source.domid = vif->domid; + gop->source.offset = txp->offset; + + gop->dest.domid = DOMID_SELF; + + gop->dest.offset = dst_offset; + gop->dest.u.gmfn = virt_to_mfn(page_address(page)); + + if (dst_offset + txp->size > PAGE_SIZE) { + /* This page can only merge a portion + * of tx request. Do not increment any + * pointer / counter here. The txp + * will be dealt with in future + * rounds, eventually hitting the + * `else` branch. + */ + gop->len = PAGE_SIZE - dst_offset; + txp->offset += gop->len; + txp->size -= gop->len; + dst_offset += gop->len; /* quit loop */ + } else { + /* This tx request can be merged in the page */ + gop->len = txp->size; + dst_offset += gop->len; + + index = pending_index(netbk->pending_cons++); + + pending_idx = netbk->pending_ring[index]; + + memcpy(&pending_tx_info[pending_idx].req, txp, + sizeof(*txp)); + xenvif_get(vif); + + pending_tx_info[pending_idx].vif = vif; + + /* Poison these fields, corresponding + * fields for head tx req will be set + * to correct values after the loop. + */ + netbk->mmap_pages[pending_idx] = (void *)(~0UL); + pending_tx_info[pending_idx].head = + INVALID_PENDING_RING_IDX; + + if (!first) { + first = &pending_tx_info[pending_idx]; + start_idx = index; + head_idx = pending_idx; + } + + txp++; + slot++; + } - gop++; + gop++; + } - memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); - xenvif_get(vif); - pending_tx_info[pending_idx].vif = vif; - frags[i].page = (void *)pending_idx; + first->req.offset = 0; + first->req.size = dst_offset; + first->head = start_idx; + set_page_ext(page, netbk, head_idx); + netbk->mmap_pages[head_idx] = page; + frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); } + BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS); + return gop; err: /* Unwind, freeing all pages and sending error responses. */ - while (i-- > start) { - xen_netbk_idx_release(netbk, (unsigned long)shinfo->frags[i].page, - XEN_NETIF_RSP_ERROR); + while (shinfo->nr_frags-- > start) { + xen_netbk_idx_release(netbk, + frag_get_pending_idx(&frags[shinfo->nr_frags]), + XEN_NETIF_RSP_ERROR); } /* The head too, if necessary. */ if (start) @@ -979,10 +1146,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, struct gnttab_copy **gopp) { struct gnttab_copy *gop = *gopp; - int pending_idx = *((u16 *)skb->data); + u16 pending_idx = *((u16 *)skb->data); struct skb_shared_info *shinfo = skb_shinfo(skb); + struct pending_tx_info *tx_info; int nr_frags = shinfo->nr_frags; int i, err, start; + u16 peek; /* peek into next tx request */ /* Check status of header. */ err = gop->status; @@ -990,15 +1159,24 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); /* Skip first skb fragment if it is on same page as header fragment. */ - start = ((unsigned long)shinfo->frags[0].page == pending_idx); + start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < nr_frags; i++) { int j, newerr; + pending_ring_idx_t head; - pending_idx = (unsigned long)shinfo->frags[i].page; + pending_idx = frag_get_pending_idx(&shinfo->frags[i]); + tx_info = &netbk->pending_tx_info[pending_idx]; + head = tx_info->head; /* Check error status: if okay then remember grant handle. */ - newerr = (++gop)->status; + do { + newerr = (++gop)->status; + if (newerr) + break; + peek = netbk->pending_ring[pending_index(++head)]; + } while (!pending_tx_is_head(netbk, peek)); + if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) @@ -1017,7 +1195,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, pending_idx = *((u16 *)skb->data); xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); for (j = start; j < i; j++) { - pending_idx = (unsigned long)shinfo->frags[i].page; + pending_idx = frag_get_pending_idx(&shinfo->frags[j]); xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } @@ -1038,15 +1216,14 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = shinfo->frags + i; struct xen_netif_tx_request *txp; - unsigned long pending_idx; + struct page *page; + u16 pending_idx; - pending_idx = (unsigned long)frag->page; + pending_idx = frag_get_pending_idx(frag); txp = &netbk->pending_tx_info[pending_idx].req; - frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); - frag->size = txp->size; - frag->page_offset = txp->offset; - + page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); + __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); skb->len += txp->size; skb->data_len += txp->size; skb->truesize += txp->size; @@ -1188,9 +1365,8 @@ out: static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) { - unsigned long now = jiffies; - unsigned long next_credit = - vif->credit_timeout.expires + + u64 now = get_jiffies_64(); + u64 next_credit = vif->credit_window_start + msecs_to_jiffies(vif->credit_usec / 1000); /* Timer could already be pending in rare cases. */ @@ -1198,8 +1374,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) return true; /* Passed the point where we can replenish credit? */ - if (time_after_eq(now, next_credit)) { - vif->credit_timeout.expires = now; + if (time_after_eq64(now, next_credit)) { + vif->credit_window_start = now; tx_add_credit(vif); } @@ -1211,6 +1387,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) tx_credit_callback; mod_timer(&vif->credit_timeout, next_credit); + vif->credit_window_start = next_credit; return true; } @@ -1224,11 +1401,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) struct sk_buff *skb; int ret; - while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && + while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX + < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) { struct xenvif *vif; struct xen_netif_tx_request txreq; - struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS]; + struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; @@ -1316,7 +1494,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) pending_idx = netbk->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && - ret < MAX_SKB_FRAGS) ? + ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? PKT_PROT_LEN : txreq.size; skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, @@ -1343,15 +1521,13 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) } /* XXX could copy straight to head */ - page = xen_netbk_alloc_page(netbk, skb, pending_idx); + page = xen_netbk_alloc_page(netbk, pending_idx); if (!page) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } - netbk->mmap_pages[pending_idx] = page; - gop->source.u.ref = txreq.gref; gop->source.domid = vif->domid; gop->source.offset = txreq.offset; @@ -1368,6 +1544,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) memcpy(&netbk->pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); netbk->pending_tx_info[pending_idx].vif = vif; + netbk->pending_tx_info[pending_idx].head = index; *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); @@ -1375,11 +1552,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { skb_shinfo(skb)->nr_frags++; - skb_shinfo(skb)->frags[0].page = - (void *)(unsigned long)pending_idx; + frag_set_pending_idx(&skb_shinfo(skb)->frags[0], + pending_idx); } else { - /* Discriminate from any valid pending_idx value. */ - skb_shinfo(skb)->frags[0].page = (void *)~0UL; + frag_set_pending_idx(&skb_shinfo(skb)->frags[0], + INVALID_PENDING_IDX); } __skb_queue_tail(&netbk->tx_queue, skb); @@ -1498,7 +1675,10 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, { struct xenvif *vif; struct pending_tx_info *pending_tx_info; - pending_ring_idx_t index; + pending_ring_idx_t head; + u16 peek; /* peek into next tx request */ + + BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL)); /* Already complete? */ if (netbk->mmap_pages[pending_idx] == NULL) @@ -1507,19 +1687,40 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, pending_tx_info = &netbk->pending_tx_info[pending_idx]; vif = pending_tx_info->vif; + head = pending_tx_info->head; + + BUG_ON(!pending_tx_is_head(netbk, head)); + BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx); - make_tx_response(vif, &pending_tx_info->req, status); + do { + pending_ring_idx_t index; + pending_ring_idx_t idx = pending_index(head); + u16 info_idx = netbk->pending_ring[idx]; - index = pending_index(netbk->pending_prod++); - netbk->pending_ring[index] = pending_idx; + pending_tx_info = &netbk->pending_tx_info[info_idx]; + make_tx_response(vif, &pending_tx_info->req, status); - xenvif_put(vif); + /* Setting any number other than + * INVALID_PENDING_RING_IDX indicates this slot is + * starting a new packet / ending a previous packet. + */ + pending_tx_info->head = 0; + + index = pending_index(netbk->pending_prod++); + netbk->pending_ring[index] = netbk->pending_ring[info_idx]; + + xenvif_put(vif); + + peek = netbk->pending_ring[pending_index(++head)]; + + } while (!pending_tx_is_head(netbk, peek)); netbk->mmap_pages[pending_idx]->mapping = 0; put_page(netbk->mmap_pages[pending_idx]); netbk->mmap_pages[pending_idx] = NULL; } + static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st) @@ -1572,8 +1773,9 @@ static inline int rx_work_todo(struct xen_netbk *netbk) static inline int tx_work_todo(struct xen_netbk *netbk) { - if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && - !list_empty(&netbk->net_schedule_list)) + if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX + < MAX_PENDING_REQS) && + !list_empty(&netbk->net_schedule_list)) return 1; return 0; @@ -1604,88 +1806,42 @@ static int xen_netbk_kthread(void *data) void xen_netbk_unmap_frontend_rings(struct xenvif *vif) { - struct gnttab_unmap_grant_ref op; - - if (vif->tx.sring) { - gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr, - GNTMAP_host_map, vif->tx_shmem_handle); - - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) - BUG(); - } - - if (vif->rx.sring) { - gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr, - GNTMAP_host_map, vif->rx_shmem_handle); - - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) - BUG(); - } - if (vif->rx_comms_area) - free_vm_area(vif->rx_comms_area); - if (vif->tx_comms_area) - free_vm_area(vif->tx_comms_area); + if (vif->tx.sring) + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), + vif->tx.sring); + if (vif->rx.sring) + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), + vif->rx.sring); } int xen_netbk_map_frontend_rings(struct xenvif *vif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref) { - struct gnttab_map_grant_ref op; + void *addr; struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err = -ENOMEM; - vif->tx_comms_area = alloc_vm_area(PAGE_SIZE); - if (vif->tx_comms_area == NULL) - goto err; - - vif->rx_comms_area = alloc_vm_area(PAGE_SIZE); - if (vif->rx_comms_area == NULL) - goto err; - - gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr, - GNTMAP_host_map, tx_ring_ref, vif->domid); - - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) - BUG(); - - if (op.status) { - netdev_warn(vif->dev, - "failed to map tx ring. err=%d status=%d\n", - err, op.status); - err = op.status; + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), + tx_ring_ref, &addr); + if (err) goto err; - } - - vif->tx_shmem_ref = tx_ring_ref; - vif->tx_shmem_handle = op.handle; - txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr; + txs = (struct xen_netif_tx_sring *)addr; BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); - gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr, - GNTMAP_host_map, rx_ring_ref, vif->domid); - - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) - BUG(); - - if (op.status) { - netdev_warn(vif->dev, - "failed to map rx ring. err=%d status=%d\n", - err, op.status); - err = op.status; + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), + rx_ring_ref, &addr); + if (err) goto err; - } - vif->rx_shmem_ref = rx_ring_ref; - vif->rx_shmem_handle = op.handle; - vif->rx_req_cons_peek = 0; - - rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr; + rxs = (struct xen_netif_rx_sring *)addr; BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); + vif->rx_req_cons_peek = 0; + return 0; err: @@ -1702,6 +1858,13 @@ static int __init netback_init(void) if (!xen_pv_domain()) return -ENODEV; + if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { + printk(KERN_INFO + "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", + fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); + fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; + } + xen_netbk_group_nr = num_online_cpus(); xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); if (!xen_netbk) { @@ -1729,7 +1892,7 @@ static int __init netback_init(void) "netback/%u", group); if (IS_ERR(netbk->task)) { - printk(KERN_ALERT "kthread_run() fails at netback\n"); + printk(KERN_ALERT "kthread_create() fails at netback\n"); del_timer(&netbk->net_timer); rc = PTR_ERR(netbk->task); goto failed_init; @@ -1770,3 +1933,4 @@ failed_init: module_init(netback_init); MODULE_LICENSE("Dual BSD/GPL"); +MODULE_ALIAS("xen-backend:vif"); |