Home | History | Annotate | Line # | Download | only in epiphany
      1 /* Signed 32 bit division optimized for Epiphany.
      2    Copyright (C) 2009-2024 Free Software Foundation, Inc.
      3    Contributed by Embecosm on behalf of Adapteva, Inc.
      4 
      5 This file is part of GCC.
      6 
      7 This file is free software; you can redistribute it and/or modify it
      8 under the terms of the GNU General Public License as published by the
      9 Free Software Foundation; either version 3, or (at your option) any
     10 later version.
     11 
     12 This file is distributed in the hope that it will be useful, but
     13 WITHOUT ANY WARRANTY; without even the implied warranty of
     14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15 General Public License for more details.
     16 
     17 Under Section 7 of GPL version 3, you are granted additional
     18 permissions described in the GCC Runtime Library Exception, version
     19 3.1, as published by the Free Software Foundation.
     20 
     21 You should have received a copy of the GNU General Public License and
     22 a copy of the GCC Runtime Library Exception along with this program;
     23 see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     24 <http://www.gnu.org/licenses/>.  */
     25 
     26 #include "epiphany-asm.h"
     27 
     28 	FSTAB (__divsi3,T_INT)
     29 	.global SYM(__divsi3)
     30 	.balign 4
     31 	HIDDEN_FUNC(__divsi3)
     32 SYM(__divsi3):
     33 	mov r12,0
     34 	sub r2,r12,r0
     35 	movlt r2,r0
     36 	sub r3,r12,r1
     37 	movlt r3,r1
     38 	sub r19,r2,r3
     39 	bltu .Lret0
     40 	movt r12,0x4000
     41 	orr r16,r2,r12
     42 	orr r18,r3,r12
     43 	 fsub r16,r16,r12
     44 	fsub r18,r18,r12
     45 	 movt r12,0x4b80
     46 	lsr r19,r3,23
     47 	lsr r17,r2,23
     48 	movt r17,0x4b80
     49 	fsub r17,r17,r12
     50 	 movt r19,0x4b80
     51 	fsub r19,r19,r12
     52 	 mov r12,%low(.L0step)
     53 	movt r12,%high(.L0step)
     54 	mov r20,0
     55 	mov r21,1
     56 	movne r16,r17
     57 	lsr r17,r3,23
     58 	movne r18,r19
     59 	eor r1,r1,r0 ; save sign
     60 	asr r19,r1,31
     61 	lsr r1,r16,23
     62 	lsr r0,r18,23
     63 	sub r1,r1,r0 ; calculate bit number difference.
     64 	lsl r3,r3,r1
     65 	lsr r16,r3,1
     66 	lsl r0,r21,r1
     67 	lsl r1,r1,3
     68 	sub r12,r12,r1
     69 	sub r3,r2,r3
     70 	movgteu r2,r3
     71 	movgteu r20,r0
     72 	lsr r0,r0,1
     73 	add r17,r0,r20
     74 	sub r3,r2,r16
     75 	movgteu r2,r3
     76 	movgteu r20,r17
     77 	sub r16,r16,1
     78 	jr r12
     79 	.rep 30
     80 	lsl r2,r2,1
     81 	sub r3,r2,r16
     82 	movgteu r2,r3
     83 	.endr
     84 	sub r0,r0,1 ; mask result bits from steps ...
     85 	and r0,r0,r2
     86 	orr r20,r0,r20 ; ... and combine with first bit.
     87 .L0step:eor r0,r20,r19 ; restore sign
     88 	sub r0,r0,r19
     89 	rts
     90 .Lret0:	mov r0,0
     91 	rts
     92 	ENDFUNC(__divsi3)
     93