Home | History | Annotate | Download | only in pci

Lines Matching defs:xn

1091 static void xmm7360_mux_frame_init(struct xmm_net *xn, struct mux_frame *frame, int sequence)
1093 frame->sequence = xn->sequence;
1094 frame->max_size = xn->xmm->td_ring[0].page_size;
1189 static int xmm7360_mux_control(struct xmm_net *xn, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
1191 struct mux_frame *frame = &xn->frame;
1196 spin_lock_irqsave(&xn->lock, flags);
1198 xmm7360_mux_frame_init(xn, frame, 0);
1200 xmm7360_mux_frame_add_tag(frame, XMM_TAG_CMDH, xn->channel, cmdh_args, sizeof(cmdh_args));
1201 ret = xmm7360_mux_frame_push(xn->xmm, frame);
1203 spin_unlock_irqrestore(&xn->lock, flags);
1208 static void xmm7360_net_flush(struct xmm_net *xn)
1210 struct mux_frame *frame = &xn->frame;
1216 BUG_ON(skb_queue_empty(&xn->queue));
1218 BUG_ON(!xmm7360_qp_can_write(xn->qp));
1220 xmm7360_mux_frame_init(xn, frame, xn->sequence++);
1223 xmm7360_os_handle_net_dequeue(xn, frame);
1224 xn->queued_packets = xn->queued_bytes = 0;
1226 xmm7360_mux_frame_add_tag(frame, XMM_TAG_ADTH, xn->channel, &unknown, sizeof(uint32_t));
1229 ret = xmm7360_mux_frame_push(xn->xmm, frame);
1236 dev_err(xn->xmm->dev, "Failed to ship coalesced frame");
1275 static void xmm7360_net_mux_handle_frame(struct xmm_net *xn, u8 *data, int len)
1287 dev_info(xn->xmm->dev, "Unexpected tag %x\n", first->tag);
1293 dev_err(xn->xmm->dev, "Unexpected tag %x, expected ADTH\n", adth->tag);
1305 xmm7360_os_handle_net_frame(xn->xmm,
1315 struct xmm_net *xn = xmm->net;
1318 BUG_ON(!xn);
1320 qp = xn->qp;
1323 spin_lock_irqsave(&xn->lock, flags);
1326 xmm7360_os_handle_net_txwake(xn);
1331 xmm7360_net_mux_handle_frame(xn, ring->pages[idx], nread);
1338 spin_unlock_irqrestore(&xn->lock, flags);
1349 struct xmm_net *xn = netdev_priv(dev);
1350 xn->queued_packets = xn->queued_bytes = 0;
1351 skb_queue_purge(&xn->queue);
1353 return xmm7360_mux_control(xn, 1, 0, 0, 0);
1362 static int xmm7360_net_must_flush(struct xmm_net *xn, int new_packet_bytes)
1365 if (xn->queued_packets >= MUX_MAX_PACKETS)
1368 frame_size = sizeof(struct mux_first_header) + xn->queued_bytes + sizeof(struct mux_next_header) + 4 + sizeof(struct mux_bounds)*xn->queued_packets;
1372 return frame_size > xn->frame.max_size;
1377 struct xmm_net *xn = container_of(t, struct xmm_net, deadline);
1379 spin_lock_irqsave(&xn->lock, flags);
1380 if (!skb_queue_empty(&xn->queue) && xmm7360_qp_can_write(xn->qp))
1381 xmm7360_net_flush(xn);
1382 spin_unlock_irqrestore(&xn->lock, flags);
1388 struct xmm_net *xn = netdev_priv(dev);
1397 spin_lock_irqsave(&xn->lock, flags);
1398 if (xmm7360_net_must_flush(xn, skb->len)) {
1399 if (xmm7360_qp_can_write(xn->qp)) {
1400 xmm7360_net_flush(xn);
1403 spin_unlock_irqrestore(&xn->lock, flags);
1408 xn->queued_packets++;
1409 xn->queued_bytes += 16 + skb->len;
1410 skb_queue_tail(&xn->queue, skb);
1412 spin_unlock_irqrestore(&xn->lock, flags);
1414 if (!hrtimer_active(&xn->deadline)) {
1416 hrtimer_start(&xn->deadline, kt, HRTIMER_MODE_REL);
1450 static void xmm7360_os_handle_net_dequeue(struct xmm_net *xn, struct mux_frame *frame)
1455 while ((skb = skb_dequeue(&xn->queue))) {
1466 static void xmm7360_os_handle_net_txwake(struct xmm_net *xn)
1468 BUG_ON(!xmm7360_qp_can_write(xn->qp));
1470 if (netif_queue_stopped(xn->xmm->netdev))
1471 netif_wake_queue(xn->xmm->netdev);
1483 struct xmm_net *xn = netdev_priv(dev);
1484 spin_lock_init(&xn->lock);
1485 hrtimer_init(&xn->deadline, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1486 xn->deadline.function = xmm7360_net_deadline_cb;
1487 skb_queue_head_init(&xn->queue);
1506 struct xmm_net *xn;
1518 xn = netdev_priv(netdev);
1519 xn->xmm = xmm;
1520 xmm->net = xn;
1526 xn->qp = xmm7360_init_qp(xmm, num, 128, TD_MAX_PAGE_SIZE);
1529 ret = xmm7360_qp_start(xn->qp);
1534 xmm7360_qp_stop(xn->qp);
2960 xmm7360_os_handle_net_dequeue(struct xmm_net *xn, struct mux_frame *frame)
2963 container_of(xn, struct wwan_softc, sc_xmm_net);
2968 MUTEX_ASSERT_LOCKED(&xn->lock);
3001 static void xmm7360_os_handle_net_txwake(struct xmm_net *xn)
3004 container_of(xn, struct wwan_softc, sc_xmm_net);
3007 MUTEX_ASSERT_LOCKED(&xn->lock);
3009 KASSERT(xmm7360_qp_can_write(xn->qp));
3227 struct xmm_net *xn;
3232 xn = &sc_if->sc_xmm_net;
3233 mutex_init(&xn->lock);
3236 xn->qp = &xmm->qp[0];
3237 xmm7360_qp_start(xn->qp);
3238 xmm->net = xn;
3248 IFQ_SET_MAXLEN(&ifp->if_snd, xn->qp->depth);