chris@umcp-cs.UUCP (10/10/83)
# Block copy routines for VAXen.
# For non-vax people:
# bcopy (from, to, count) register char *from, *to; register int count; {
# while (--count >= 0) *to++ = *from++;
# }
# cpyn (to, from, count) register char *to, *from; register int count; {
# while (--count >= 0) *to++ = *from++;
# }
#
# bcopy (from, to, count) char *from, *to; int count;
#
# Copy "count" bytes from "from" to "to"; not guaranteed to
# work if "from" and "to" overlap.
#
# cpyn (to, from, count) char *to, *from; int count;
.align 2
.globl _cpyn
_cpyn:
.word 0
movl 4(ap),r3 # r3 = to
movl 8(ap),r1 # r1 = from
brb 2f
.align 2
.globl _bcopy
_bcopy:
.word 0
movl 4(ap),r1 # r1 = from
movl 8(ap),r3 # r3 = to
brb 2f
1:
subl2 r0,12(ap) # count-=65535 (bytes moved this time)
movc3 r0,(r1),(r3) # r1, r3 magically point to next 65K
2:
movzwl $65535,r0
cmpl 12(ap),r0 # <= 65535 bytes to move?
jgtr 1b # brif not, move 65535 and try again
movc3 12(ap),(r1),(r3) # move up to 65535 bytes
ret
--
In-Real-Life: Chris Torek, Univ of MD Comp Sci
UUCP: {seismo,allegra,brl-bmd}!umcp-cs!chris
CSNet: chris@umcp-cs ARPA: chris.umcp-cs@UDel-Relaydmmartindale@watcgl.UUCP (Dave Martindale) (10/13/83)
In the VAX bcopy function posted, it would probably be faster if the constant 65535 was replaced by 65532 wherever it occurs, thus causing all memory references to be longword references if the source and destination are longword-aligned.