Home | History | Annotate | Line # | Download | only in netinet
cpu_in_cksum.c revision 1.1.2.2
      1  1.1.2.2  yamt /*	$NetBSD: cpu_in_cksum.c,v 1.1.2.2 2008/02/04 09:24:39 yamt Exp $	*/
      2  1.1.2.2  yamt /*-
      3  1.1.2.2  yamt  * Copyright (c) 2008 Joerg Sonnenberger <joerg (at) NetBSD.org>.
      4  1.1.2.2  yamt  * All rights reserved.
      5  1.1.2.2  yamt  *
      6  1.1.2.2  yamt  * Redistribution and use in source and binary forms, with or without
      7  1.1.2.2  yamt  * modification, are permitted provided that the following conditions
      8  1.1.2.2  yamt  * are met:
      9  1.1.2.2  yamt  *
     10  1.1.2.2  yamt  * 1. Redistributions of source code must retain the above copyright
     11  1.1.2.2  yamt  *    notice, this list of conditions and the following disclaimer.
     12  1.1.2.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1.2.2  yamt  *    notice, this list of conditions and the following disclaimer in
     14  1.1.2.2  yamt  *    the documentation and/or other materials provided with the
     15  1.1.2.2  yamt  *    distribution.
     16  1.1.2.2  yamt  *
     17  1.1.2.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     18  1.1.2.2  yamt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     19  1.1.2.2  yamt  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     20  1.1.2.2  yamt  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
     21  1.1.2.2  yamt  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  1.1.2.2  yamt  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  1.1.2.2  yamt  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  1.1.2.2  yamt  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  1.1.2.2  yamt  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  1.1.2.2  yamt  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     27  1.1.2.2  yamt  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  1.1.2.2  yamt  * SUCH DAMAGE.
     29  1.1.2.2  yamt  */
     30  1.1.2.2  yamt 
     31  1.1.2.2  yamt #include <sys/cdefs.h>
     32  1.1.2.2  yamt __KERNEL_RCSID(0, "$NetBSD: cpu_in_cksum.c,v 1.1.2.2 2008/02/04 09:24:39 yamt Exp $");
     33  1.1.2.2  yamt 
     34  1.1.2.2  yamt #include <sys/param.h>
     35  1.1.2.2  yamt #include <sys/endian.h>
     36  1.1.2.2  yamt #include <sys/mbuf.h>
     37  1.1.2.2  yamt #ifdef _KERNEL
     38  1.1.2.2  yamt #include <sys/systm.h>
     39  1.1.2.2  yamt #else
     40  1.1.2.2  yamt #include <assert.h>
     41  1.1.2.2  yamt #include <stdbool.h>
     42  1.1.2.2  yamt #include <stdio.h>
     43  1.1.2.2  yamt 
     44  1.1.2.2  yamt #define KASSERT(x) assert(x)
     45  1.1.2.2  yamt #endif
     46  1.1.2.2  yamt 
     47  1.1.2.2  yamt #include <machine/limits.h>
     48  1.1.2.2  yamt 
     49  1.1.2.2  yamt #include <netinet/in.h>
     50  1.1.2.2  yamt 
     51  1.1.2.2  yamt #ifndef _KERNEL
     52  1.1.2.2  yamt int	cpu_in_cksum(struct mbuf*, int, int, uint32_t);
     53  1.1.2.2  yamt #endif
     54  1.1.2.2  yamt 
     55  1.1.2.2  yamt /*
     56  1.1.2.2  yamt  * Checksum routine for Internet Protocol family headers (Portable Version).
     57  1.1.2.2  yamt  *
     58  1.1.2.2  yamt  * This routine is very heavily used in the network
     59  1.1.2.2  yamt  * code and should be modified for each CPU to be as fast as possible.
     60  1.1.2.2  yamt  *
     61  1.1.2.2  yamt  * A discussion of different implementation techniques can be found in
     62  1.1.2.2  yamt  * RFC 1071.
     63  1.1.2.2  yamt  *
     64  1.1.2.2  yamt  * The default implementation for 32bit architectures is using
     65  1.1.2.2  yamt  * a 32bit accumulator and operating on 16bit operands.
     66  1.1.2.2  yamt  *
     67  1.1.2.2  yamt  * The default implementation for 64bit architectures is using
     68  1.1.2.2  yamt  * a 64bit accumulator and operating on 32bit operands.
     69  1.1.2.2  yamt  *
     70  1.1.2.2  yamt  * Both versions are unrolled to handle 32 Byte / 64 Byte fragments as core
     71  1.1.2.2  yamt  * of the inner loop. After each iteration of the inner loop, a partial
     72  1.1.2.2  yamt  * reduction is done to avoid carry in long packets.
     73  1.1.2.2  yamt  */
     74  1.1.2.2  yamt 
     75  1.1.2.2  yamt #if ULONG_MAX == 0xffffffffUL
     76  1.1.2.2  yamt /* 32bit version */
     77  1.1.2.2  yamt int
     78  1.1.2.2  yamt cpu_in_cksum(struct mbuf *m, int len, int off, uint32_t initial_sum)
     79  1.1.2.2  yamt {
     80  1.1.2.2  yamt 	int mlen;
     81  1.1.2.2  yamt 	uint32_t sum, partial;
     82  1.1.2.2  yamt 	unsigned int final_acc;
     83  1.1.2.2  yamt 	uint8_t *data;
     84  1.1.2.2  yamt 	bool needs_swap, started_on_odd;
     85  1.1.2.2  yamt 
     86  1.1.2.2  yamt 	KASSERT(len >= 0);
     87  1.1.2.2  yamt 	KASSERT(off >= 0);
     88  1.1.2.2  yamt 
     89  1.1.2.2  yamt 	needs_swap = false;
     90  1.1.2.2  yamt 	started_on_odd = false;
     91  1.1.2.2  yamt 	sum = (initial_sum >> 16) + (initial_sum & 0xffff);
     92  1.1.2.2  yamt 
     93  1.1.2.2  yamt 	for (;;) {
     94  1.1.2.2  yamt 		if (__predict_false(m == NULL)) {
     95  1.1.2.2  yamt 			printf("in_cksum: out of data\n");
     96  1.1.2.2  yamt 			return -1;
     97  1.1.2.2  yamt 		}
     98  1.1.2.2  yamt 		mlen = m->m_len;
     99  1.1.2.2  yamt 		if (mlen > off) {
    100  1.1.2.2  yamt 			mlen -= off;
    101  1.1.2.2  yamt 			data = mtod(m, uint8_t *) + off;
    102  1.1.2.2  yamt 			goto post_initial_offset;
    103  1.1.2.2  yamt 		}
    104  1.1.2.2  yamt 		off -= mlen;
    105  1.1.2.2  yamt 		if (len == 0)
    106  1.1.2.2  yamt 			break;
    107  1.1.2.2  yamt 		m = m->m_next;
    108  1.1.2.2  yamt 	}
    109  1.1.2.2  yamt 
    110  1.1.2.2  yamt 	for (; len > 0; m = m->m_next) {
    111  1.1.2.2  yamt 		if (__predict_false(m == NULL)) {
    112  1.1.2.2  yamt 			printf("in_cksum: out of data\n");
    113  1.1.2.2  yamt 			return -1;
    114  1.1.2.2  yamt 		}
    115  1.1.2.2  yamt 		mlen = m->m_len;
    116  1.1.2.2  yamt 		data = mtod(m, uint8_t *);
    117  1.1.2.2  yamt  post_initial_offset:
    118  1.1.2.2  yamt 		if (mlen == 0)
    119  1.1.2.2  yamt 			continue;
    120  1.1.2.2  yamt 		if (mlen > len)
    121  1.1.2.2  yamt 			mlen = len;
    122  1.1.2.2  yamt 		len -= mlen;
    123  1.1.2.2  yamt 
    124  1.1.2.2  yamt 		partial = 0;
    125  1.1.2.2  yamt 		if ((uintptr_t)data & 1) {
    126  1.1.2.2  yamt 			/* Align on word boundary */
    127  1.1.2.2  yamt 			started_on_odd = !started_on_odd;
    128  1.1.2.2  yamt #if _BYTE_ORDER == _LITTLE_ENDIAN
    129  1.1.2.2  yamt 			partial = *data << 8;
    130  1.1.2.2  yamt #else
    131  1.1.2.2  yamt 			partial = *data;
    132  1.1.2.2  yamt #endif
    133  1.1.2.2  yamt 			++data;
    134  1.1.2.2  yamt 			--mlen;
    135  1.1.2.2  yamt 		}
    136  1.1.2.2  yamt 		needs_swap = started_on_odd;
    137  1.1.2.2  yamt 		while (mlen >= 32) {
    138  1.1.2.2  yamt 			__builtin_prefetch(data + 32);
    139  1.1.2.2  yamt 			partial += *(uint16_t *)data;
    140  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 2);
    141  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 4);
    142  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 6);
    143  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 8);
    144  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 10);
    145  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 12);
    146  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 14);
    147  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 16);
    148  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 18);
    149  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 20);
    150  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 22);
    151  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 24);
    152  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 26);
    153  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 28);
    154  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 30);
    155  1.1.2.2  yamt 			data += 32;
    156  1.1.2.2  yamt 			mlen -= 32;
    157  1.1.2.2  yamt 			if (__predict_false(partial & 0xc0000000)) {
    158  1.1.2.2  yamt 				if (needs_swap)
    159  1.1.2.2  yamt 					partial = (partial << 8) + (partial >> 24);
    160  1.1.2.2  yamt 				sum += (partial >> 16);
    161  1.1.2.2  yamt 				sum += (partial & 0xffff);
    162  1.1.2.2  yamt 				partial = 0;
    163  1.1.2.2  yamt 			}
    164  1.1.2.2  yamt 		}
    165  1.1.2.2  yamt 		if (mlen & 16) {
    166  1.1.2.2  yamt 			partial += *(uint16_t *)data;
    167  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 2);
    168  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 4);
    169  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 6);
    170  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 8);
    171  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 10);
    172  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 12);
    173  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 14);
    174  1.1.2.2  yamt 			data += 16;
    175  1.1.2.2  yamt 			mlen -= 16;
    176  1.1.2.2  yamt 		}
    177  1.1.2.2  yamt 		/*
    178  1.1.2.2  yamt 		 * mlen is not updated below as the remaining tests
    179  1.1.2.2  yamt 		 * are using bit masks, which are not affected.
    180  1.1.2.2  yamt 		 */
    181  1.1.2.2  yamt 		if (mlen & 8) {
    182  1.1.2.2  yamt 			partial += *(uint16_t *)data;
    183  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 2);
    184  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 4);
    185  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 6);
    186  1.1.2.2  yamt 			data += 8;
    187  1.1.2.2  yamt 		}
    188  1.1.2.2  yamt 		if (mlen & 4) {
    189  1.1.2.2  yamt 			partial += *(uint16_t *)data;
    190  1.1.2.2  yamt 			partial += *(uint16_t *)(data + 2);
    191  1.1.2.2  yamt 			data += 4;
    192  1.1.2.2  yamt 		}
    193  1.1.2.2  yamt 		if (mlen & 2) {
    194  1.1.2.2  yamt 			partial += *(uint16_t *)data;
    195  1.1.2.2  yamt 			data += 2;
    196  1.1.2.2  yamt 		}
    197  1.1.2.2  yamt 		if (mlen & 1) {
    198  1.1.2.2  yamt #if _BYTE_ORDER == _LITTLE_ENDIAN
    199  1.1.2.2  yamt 			partial += *data;
    200  1.1.2.2  yamt #else
    201  1.1.2.2  yamt 			partial += *data << 8;
    202  1.1.2.2  yamt #endif
    203  1.1.2.2  yamt 			started_on_odd = !started_on_odd;
    204  1.1.2.2  yamt 		}
    205  1.1.2.2  yamt 
    206  1.1.2.2  yamt 		if (needs_swap)
    207  1.1.2.2  yamt 			partial = (partial << 8) + (partial >> 24);
    208  1.1.2.2  yamt 		sum += (partial >> 16) + (partial & 0xffff);
    209  1.1.2.2  yamt 		/*
    210  1.1.2.2  yamt 		 * Reduce sum to allow potential byte swap
    211  1.1.2.2  yamt 		 * in the next iteration without carry.
    212  1.1.2.2  yamt 		 */
    213  1.1.2.2  yamt 		sum = (sum >> 16) + (sum & 0xffff);
    214  1.1.2.2  yamt 	}
    215  1.1.2.2  yamt 	final_acc = ((sum >> 16) & 0xffff) + (sum & 0xffff);
    216  1.1.2.2  yamt 	final_acc = (final_acc >> 16) + (final_acc & 0xffff);
    217  1.1.2.2  yamt 	return ~final_acc & 0xffff;
    218  1.1.2.2  yamt }
    219  1.1.2.2  yamt 
    220  1.1.2.2  yamt #else
    221  1.1.2.2  yamt /* 64bit version */
    222  1.1.2.2  yamt int
    223  1.1.2.2  yamt cpu_in_cksum(struct mbuf *m, int len, int off, uint32_t initial_sum)
    224  1.1.2.2  yamt {
    225  1.1.2.2  yamt 	int mlen;
    226  1.1.2.2  yamt 	uint64_t sum, partial;
    227  1.1.2.2  yamt 	unsigned int final_acc;
    228  1.1.2.2  yamt 	uint8_t *data;
    229  1.1.2.2  yamt 	bool needs_swap, started_on_odd;
    230  1.1.2.2  yamt 
    231  1.1.2.2  yamt 	KASSERT(len >= 0);
    232  1.1.2.2  yamt 	KASSERT(off >= 0);
    233  1.1.2.2  yamt 
    234  1.1.2.2  yamt 	needs_swap = false;
    235  1.1.2.2  yamt 	started_on_odd = false;
    236  1.1.2.2  yamt 	sum = initial_sum;
    237  1.1.2.2  yamt 
    238  1.1.2.2  yamt 	for (;;) {
    239  1.1.2.2  yamt 		if (__predict_false(m == NULL)) {
    240  1.1.2.2  yamt 			printf("in_cksum: out of data\n");
    241  1.1.2.2  yamt 			return -1;
    242  1.1.2.2  yamt 		}
    243  1.1.2.2  yamt 		mlen = m->m_len;
    244  1.1.2.2  yamt 		if (mlen > off) {
    245  1.1.2.2  yamt 			mlen -= off;
    246  1.1.2.2  yamt 			data = mtod(m, uint8_t *) + off;
    247  1.1.2.2  yamt 			goto post_initial_offset;
    248  1.1.2.2  yamt 		}
    249  1.1.2.2  yamt 		off -= mlen;
    250  1.1.2.2  yamt 		if (len == 0)
    251  1.1.2.2  yamt 			break;
    252  1.1.2.2  yamt 		m = m->m_next;
    253  1.1.2.2  yamt 	}
    254  1.1.2.2  yamt 
    255  1.1.2.2  yamt 	for (; len > 0; m = m->m_next) {
    256  1.1.2.2  yamt 		if (__predict_false(m == NULL)) {
    257  1.1.2.2  yamt 			printf("in_cksum: out of data\n");
    258  1.1.2.2  yamt 			return -1;
    259  1.1.2.2  yamt 		}
    260  1.1.2.2  yamt 		mlen = m->m_len;
    261  1.1.2.2  yamt 		data = mtod(m, uint8_t *);
    262  1.1.2.2  yamt  post_initial_offset:
    263  1.1.2.2  yamt 		if (mlen == 0)
    264  1.1.2.2  yamt 			continue;
    265  1.1.2.2  yamt 		if (mlen > len)
    266  1.1.2.2  yamt 			mlen = len;
    267  1.1.2.2  yamt 		len -= mlen;
    268  1.1.2.2  yamt 
    269  1.1.2.2  yamt 		partial = 0;
    270  1.1.2.2  yamt 		if ((uintptr_t)data & 1) {
    271  1.1.2.2  yamt 			/* Align on word boundary */
    272  1.1.2.2  yamt 			started_on_odd = !started_on_odd;
    273  1.1.2.2  yamt #if _BYTE_ORDER == _LITTLE_ENDIAN
    274  1.1.2.2  yamt 			partial = *data << 8;
    275  1.1.2.2  yamt #else
    276  1.1.2.2  yamt 			partial = *data;
    277  1.1.2.2  yamt #endif
    278  1.1.2.2  yamt 			++data;
    279  1.1.2.2  yamt 			--mlen;
    280  1.1.2.2  yamt 		}
    281  1.1.2.2  yamt 		needs_swap = started_on_odd;
    282  1.1.2.2  yamt 		if ((uintptr_t)data & 2) {
    283  1.1.2.2  yamt 			if (mlen < 2)
    284  1.1.2.2  yamt 				goto trailing_bytes;
    285  1.1.2.2  yamt 			partial += *(uint16_t *)data;
    286  1.1.2.2  yamt 			data += 2;
    287  1.1.2.2  yamt 			mlen -= 2;
    288  1.1.2.2  yamt 		}
    289  1.1.2.2  yamt 		while (mlen >= 64) {
    290  1.1.2.2  yamt 			__builtin_prefetch(data + 32);
    291  1.1.2.2  yamt 			__builtin_prefetch(data + 64);
    292  1.1.2.2  yamt 			partial += *(uint32_t *)data;
    293  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 4);
    294  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 8);
    295  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 12);
    296  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 16);
    297  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 20);
    298  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 24);
    299  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 28);
    300  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 32);
    301  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 36);
    302  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 40);
    303  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 44);
    304  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 48);
    305  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 52);
    306  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 56);
    307  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 60);
    308  1.1.2.2  yamt 			data += 64;
    309  1.1.2.2  yamt 			mlen -= 64;
    310  1.1.2.2  yamt 			if (__predict_false(partial & (3ULL << 62))) {
    311  1.1.2.2  yamt 				if (needs_swap)
    312  1.1.2.2  yamt 					partial = (partial << 8) + (partial >> 56);
    313  1.1.2.2  yamt 				sum += (partial >> 32);
    314  1.1.2.2  yamt 				sum += (partial & 0xffffffff);
    315  1.1.2.2  yamt 				partial = 0;
    316  1.1.2.2  yamt 			}
    317  1.1.2.2  yamt 		}
    318  1.1.2.2  yamt 		/*
    319  1.1.2.2  yamt 		 * mlen is not updated below as the remaining tests
    320  1.1.2.2  yamt 		 * are using bit masks, which are not affected.
    321  1.1.2.2  yamt 		 */
    322  1.1.2.2  yamt 		if (mlen & 32) {
    323  1.1.2.2  yamt 			partial += *(uint32_t *)data;
    324  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 4);
    325  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 8);
    326  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 12);
    327  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 16);
    328  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 20);
    329  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 24);
    330  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 28);
    331  1.1.2.2  yamt 			data += 32;
    332  1.1.2.2  yamt 		}
    333  1.1.2.2  yamt 		if (mlen & 16) {
    334  1.1.2.2  yamt 			partial += *(uint32_t *)data;
    335  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 4);
    336  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 8);
    337  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 12);
    338  1.1.2.2  yamt 			data += 16;
    339  1.1.2.2  yamt 		}
    340  1.1.2.2  yamt 		if (mlen & 8) {
    341  1.1.2.2  yamt 			partial += *(uint32_t *)data;
    342  1.1.2.2  yamt 			partial += *(uint32_t *)(data + 4);
    343  1.1.2.2  yamt 			data += 8;
    344  1.1.2.2  yamt 		}
    345  1.1.2.2  yamt 		if (mlen & 4) {
    346  1.1.2.2  yamt 			partial += *(uint32_t *)data;
    347  1.1.2.2  yamt 			data += 4;
    348  1.1.2.2  yamt 		}
    349  1.1.2.2  yamt 		if (mlen & 2) {
    350  1.1.2.2  yamt 			partial += *(uint16_t *)data;
    351  1.1.2.2  yamt 			data += 2;
    352  1.1.2.2  yamt 		}
    353  1.1.2.2  yamt  trailing_bytes:
    354  1.1.2.2  yamt 		if (mlen & 1) {
    355  1.1.2.2  yamt #if _BYTE_ORDER == _LITTLE_ENDIAN
    356  1.1.2.2  yamt 			partial += *data;
    357  1.1.2.2  yamt #else
    358  1.1.2.2  yamt 			partial += *data << 8;
    359  1.1.2.2  yamt #endif
    360  1.1.2.2  yamt 			started_on_odd = !started_on_odd;
    361  1.1.2.2  yamt 		}
    362  1.1.2.2  yamt 
    363  1.1.2.2  yamt 		if (needs_swap)
    364  1.1.2.2  yamt 			partial = (partial << 8) + (partial >> 56);
    365  1.1.2.2  yamt 		sum += (partial >> 32) + (partial & 0xffffffff);
    366  1.1.2.2  yamt 		/*
    367  1.1.2.2  yamt 		 * Reduce sum to allow potential byte swap
    368  1.1.2.2  yamt 		 * in the next iteration without carry.
    369  1.1.2.2  yamt 		 */
    370  1.1.2.2  yamt 		sum = (sum >> 32) + (sum & 0xffffffff);
    371  1.1.2.2  yamt 	}
    372  1.1.2.2  yamt 	final_acc = (sum >> 48) + ((sum >> 32) & 0xffff) +
    373  1.1.2.2  yamt 	    ((sum >> 16) & 0xffff) + (sum & 0xffff);
    374  1.1.2.2  yamt 	final_acc = (final_acc >> 16) + (final_acc & 0xffff);
    375  1.1.2.2  yamt 	final_acc = (final_acc >> 16) + (final_acc & 0xffff);
    376  1.1.2.2  yamt 	return ~final_acc & 0xffff;
    377  1.1.2.2  yamt }
    378  1.1.2.2  yamt #endif
    379