Home | History | Annotate | Line # | Download | only in booke
spe_subr.S revision 1.3.2.1
      1      1.2  matt /*-
      2      1.2  matt  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      3      1.2  matt  * All rights reserved.
      4      1.2  matt  *
      5      1.2  matt  * This code is derived from software contributed to The NetBSD Foundation
      6      1.2  matt  * by Matt Thomas of 3am Software Foundry.
      7      1.2  matt  *
      8      1.2  matt  * Redistribution and use in source and binary forms, with or without
      9      1.2  matt  * modification, are permitted provided that the following conditions
     10      1.2  matt  * are met:
     11      1.2  matt  * 1. Redistributions of source code must retain the above copyright
     12      1.2  matt  *    notice, this list of conditions and the following disclaimer.
     13      1.2  matt  * 2. Redistributions in binary form must reproduce the above copyright
     14      1.2  matt  *    notice, this list of conditions and the following disclaimer in the
     15      1.2  matt  *    documentation and/or other materials provided with the distribution.
     16      1.2  matt  *
     17      1.2  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18      1.2  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19      1.2  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20      1.2  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21      1.2  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22      1.2  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23      1.2  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24      1.2  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25      1.2  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26      1.2  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27      1.2  matt  * POSSIBILITY OF SUCH DAMAGE.
     28      1.2  matt  */
     29      1.2  matt 
     30      1.2  matt #include <machine/asm.h>
     31      1.2  matt #include "assym.h"
     32      1.2  matt 
     33  1.3.2.1   tls RCSID("$NetBSD: spe_subr.S,v 1.3.2.1 2014/08/20 00:03:19 tls Exp $")
     34      1.2  matt 
     35      1.2  matt 	.text
     36  1.3.2.1   tls #ifndef __clang__
     37      1.2  matt 	.machine	e500x2
     38  1.3.2.1   tls #endif
     39      1.2  matt /*
     40      1.2  matt  * Lintstub: void vec_load_from_vreg(const struct vreg *);
     41      1.2  matt  */
     42      1.2  matt ENTRY(vec_load_from_vreg)
     43      1.2  matt 	/*
     44      1.2  matt 	 * Load and initialize the accumulator.  Don't care about trashing
     45      1.2  matt 	 * r0's high half since we are about to load it.
     46      1.2  matt 	 */
     47      1.2  matt 	evldd %r0,(8 << 4)(%r3)
     48      1.2  matt 	evmra %r0,%r0
     49      1.2  matt 
     50      1.2  matt 	/*
     51      1.2  matt 	 * SPEFSCR is in the trapframe.
     52      1.2  matt 	 */
     53      1.2  matt 
     54      1.2  matt 	/*
     55      1.2  matt 	 * Since the high part of the register is saved in vreg (because we
     56      1.2  matt 	 * are loading it), and we know SPE is on (to load it), just load two
     57      1.2  matt 	 * high parts at one time and merge appropriately.  For each even-odd
     58      1.2  matt 	 * register pair, the evldw will load high part for the even register
     59      1.2  matt 	 * in the high 32 bits and the high part for the odd regiter in the
     60      1.2  matt 	 * low 32 bits.
     61      1.2  matt 	 *
     62      1.2  matt 	 * The evmergelo will move the lower half of r0 to the high half of the
     63      1.2  matt 	 * destination register and evmergehilo will merge the high half of r0
     64      1.2  matt 	 * and lower half of the other register.
     65      1.2  matt 	 */
     66      1.2  matt 	evldw %r0,(30 << 2)(%r3)
     67      1.2  matt 		evmergelo %r31,%r0,%r31; evmergehilo %r30,%r0,%r30
     68      1.2  matt 	evldw %r0,(28 << 2)(%r3)
     69      1.2  matt 		evmergelo %r29,%r0,%r29; evmergehilo %r28,%r0,%r28
     70      1.2  matt 	evldw %r0,(26 << 2)(%r3)
     71      1.2  matt 		evmergelo %r27,%r0,%r27; evmergehilo %r26,%r0,%r26
     72      1.2  matt 	evldw %r0,(24 << 2)(%r3)
     73      1.2  matt 		evmergelo %r25,%r0,%r25; evmergehilo %r24,%r0,%r24
     74      1.2  matt 	evldw %r0,(22 << 2)(%r3)
     75      1.2  matt 		evmergelo %r23,%r0,%r23; evmergehilo %r22,%r0,%r22
     76      1.2  matt 	evldw %r0,(20 << 2)(%r3)
     77      1.2  matt 		evmergelo %r21,%r0,%r21; evmergehilo %r20,%r0,%r20
     78      1.2  matt 	evldw %r0,(18 << 2)(%r3)
     79      1.2  matt 		evmergelo %r19,%r0,%r19; evmergehilo %r18,%r0,%r18
     80      1.2  matt 	evldw %r0,(16 << 2)(%r3)
     81      1.2  matt 		evmergelo %r17,%r0,%r17; evmergehilo %r16,%r0,%r16
     82      1.2  matt 	evldw %r0,(14 << 2)(%r3)
     83      1.2  matt 		evmergelo %r15,%r0,%r15; evmergehilo %r14,%r0,%r14
     84      1.2  matt 	evldw %r0,(12 << 2)(%r3)
     85      1.2  matt 		evmergelo %r13,%r0,%r13; evmergehilo %r12,%r0,%r12
     86      1.2  matt 	/*
     87      1.2  matt 	 * Done with callee-saved registers.  For caller-saved we can just
     88      1.2  matt 	 * trash the register contents.  So load the two words with the high
     89      1.2  matt 	 * halves into the appropriate register and merge the half for the
     90      1.2  matt 	 * next register into it.
     91      1.2  matt 	 */
     92      1.2  matt 	evldw %r10,(10 << 2)(%r3)
     93      1.2  matt 		evmergelo %r11,%r10,%r11
     94      1.2  matt 	evldw %r8,(8 << 2)(%r3)
     95      1.3  matt 		evmergelo %r9,%r8,%r9
     96      1.2  matt 	evldw %r6,(6 << 2)(%r3)
     97      1.3  matt 		evmergelo %r7,%r6,%r7
     98      1.2  matt 	evldw %r4,(4 << 2)(%r3)
     99      1.3  matt 		evmergelo %r5,%r4,%r5
    100      1.2  matt 
    101      1.2  matt 	/*
    102      1.2  matt 	 * R2 isn't a callee-saved, so load into r0 because we still need r3
    103      1.2  matt 	 */
    104      1.2  matt 	evldw %r0,(2 << 2)(%r3)
    105      1.2  matt 		evmergelo %r3,%r0,%r3; evmergehilo %r2,%r0,%r2
    106      1.2  matt 	evldd %r0,(0 << 2)(%r3)
    107      1.2  matt 		evmergelo %r1,%r0,%r1		/* why bother? */
    108      1.2  matt 
    109      1.2  matt 	blr
    110      1.2  matt END(vec_load_from_vreg)
    111      1.2  matt 
    112      1.2  matt /*
    113      1.2  matt  * Lintstub: void vec_unload_to_vreg(struct vreg *);
    114      1.2  matt  */
    115      1.2  matt ENTRY(vec_unload_to_vreg)
    116      1.2  matt 	evmergehi %r4,%r4,%r0;	/* save r0's high part in lo r4 */
    117      1.2  matt 	evmergehi %r0,%r0,%r1; evstdw %r0,(0 << 2)(%r3)
    118      1.2  matt 	evmergehi %r0,%r2,%r3; evstdw %r0,(2 << 2)(%r3)
    119      1.2  matt 	evmergehi %r0,%r4,%r5; evstdw %r0,(4 << 2)(%r3)
    120      1.2  matt 	evmergehi %r0,%r6,%r7; evstdw %r0,(6 << 2)(%r3)
    121      1.2  matt 	evmergehi %r0,%r8,%r9; evstdw %r0,(8 << 2)(%r3)
    122      1.2  matt 	evmergehi %r0,%r10,%r11; evstdw %r0,(10 << 2)(%r3)
    123      1.2  matt 	evmergehi %r0,%r12,%r13; evstdw %r0,(12 << 2)(%r3)
    124      1.2  matt 	evmergehi %r0,%r14,%r15; evstdw %r0,(14 << 2)(%r3)
    125      1.2  matt 	evmergehi %r0,%r16,%r17; evstdw %r0,(16 << 2)(%r3)
    126      1.2  matt 	evmergehi %r0,%r18,%r19; evstdw %r0,(18 << 2)(%r3)
    127      1.2  matt 	evmergehi %r0,%r20,%r21; evstdw %r0,(20 << 2)(%r3)
    128      1.2  matt 	evmergehi %r0,%r22,%r23; evstdw %r0,(22 << 2)(%r3)
    129      1.2  matt 	evmergehi %r0,%r24,%r25; evstdw %r0,(24 << 2)(%r3)
    130      1.2  matt 	evmergehi %r0,%r26,%r27; evstdw %r0,(26 << 2)(%r3)
    131      1.2  matt 	evmergehi %r0,%r28,%r29; evstdw %r0,(28 << 2)(%r3)
    132      1.2  matt 	evmergehi %r0,%r30,%r31; evstdw %r0,(30 << 2)(%r3)
    133      1.2  matt 
    134      1.2  matt 	/*
    135      1.2  matt 	 * Now save the accumulator.
    136      1.2  matt 	 */
    137      1.2  matt 	evxor %r0,%r0,%r0		/* zero r0 */
    138      1.2  matt 	evaddumiaaw %r0,%r0		/* r0 = accum + r0 */
    139      1.2  matt 	evstdd %r0,(8 << 4)(%r3)	/* store it */
    140      1.2  matt 
    141      1.2  matt 	evmergelo %r0,%r4,%r0		/* retore r0's high half */
    142      1.2  matt 
    143      1.2  matt 	/*
    144      1.2  matt 	 * The SPEFSCR will be restored when the exception returns.
    145      1.2  matt 	 */
    146      1.2  matt 	blr
    147      1.2  matt END(vec_unload_to_vreg)
    148