Commit 4875bf14 authored by Vincenzo Maffione's avatar Vincenzo Maffione Committed by Jason Wang
Browse files

net: netmap: improve netmap_receive_iov()



Changes:
  - Save CPU cycles by computing the return value while scanning the
    input iovec, rather than calling iov_size() at the end.
  - Remove check for s->tx != NULL, because it cannot happen.
  - Cache ring->tail in a local variable and use it to check for
    space in the TX ring. The use of nm_ring_empty() was invalid,
    because nobody is updating ring->cur and ring->head at that point.
  - In case we run out of netmap slots in the middle of a packet,
    move the wake-up point by advancing ring->cur, but do not
    expose the incomplete packet (i.e., by updating also ring->head).

Signed-off-by: default avatarVincenzo Maffione <v.maffione@gmail.com>
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
parent c7cbb6b4
Loading
Loading
Loading
Loading
+17 −12
Original line number Diff line number Diff line
@@ -159,21 +159,22 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
{
    NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
    struct netmap_ring *ring = s->tx;
    unsigned int tail = ring->tail;
    ssize_t totlen = 0;
    uint32_t last;
    uint32_t idx;
    uint8_t *dst;
    int j;
    uint32_t i;

    if (unlikely(!ring)) {
        /* Drop the packet. */
        return iov_size(iov, iovcnt);
    }

    last = i = ring->cur;
    last = i = ring->head;

    if (nm_ring_space(ring) < iovcnt) {
        /* Not enough netmap slots. */
        /* Not enough netmap slots. Tell the kernel that we have seen the new
         * available slots (so that it notifies us again when it has more
         * ones), but without publishing any new slots to be processed
         * (e.g., we don't advance ring->head). */
        ring->cur = tail;
        netmap_write_poll(s, true);
        return 0;
    }
@@ -183,14 +184,17 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
        int offset = 0;
        int nm_frag_size;

        totlen += iov_frag_size;

        /* Split each iovec fragment over more netmap slots, if
           necessary. */
        while (iov_frag_size) {
            nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);

            if (unlikely(nm_ring_empty(ring))) {
                /* We run out of netmap slots while splitting the
            if (unlikely(i == tail)) {
                /* We ran out of netmap slots while splitting the
                   iovec fragments. */
                ring->cur = tail;
                netmap_write_poll(s, true);
                return 0;
            }
@@ -212,12 +216,13 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
    /* The last slot must not have NS_MOREFRAG set. */
    ring->slot[last].flags &= ~NS_MOREFRAG;

    /* Now update ring->cur and ring->head. */
    ring->cur = ring->head = i;
    /* Now update ring->head and ring->cur to publish the new slots and
     * the new wakeup point. */
    ring->head = ring->cur = i;

    ioctl(s->nmd->fd, NIOCTXSYNC, NULL);

    return iov_size(iov, iovcnt);
    return totlen;
}

static ssize_t netmap_receive(NetClientState *nc,