Global Corpus
Collection
6 items
•
Updated
•
2
repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
0015/esp_rlottie
| 19,929
|
rlottie/src/vector/pixman/pixman-arm-neon-asm.S
|
/*
* Copyright © 2009 Nokia Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka ([email protected])
*/
/*
* This file contains implementations of NEON optimized pixel processing
* functions. There is no full and detailed tutorial, but some functions
* (those which are exposing some new or interesting features) are
* extensively commented and can be used as examples.
*
* You may want to have a look at the comments for following functions:
* - pixman_composite_over_8888_0565_asm_neon
* - pixman_composite_over_n_8_0565_asm_neon
*/
/* Prevent the stack from becoming executable for no reason... */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.fpu neon
.arch armv7a
.object_arch armv4
.eabi_attribute 10, 0 /* suppress Tag_FP_arch */
.eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */
.arm
.altmacro
.p2align 2
//#include "pixman-arm-asm.h"
/* Supplementary macro for setting function attributes */
.macro pixman_asm_function fname
.func fname
.global fname
#ifdef __ELF__
.hidden fname
.type fname, %function
#endif
fname:
.endm
//#include "pixman-private.h"
/*
* The defines which are shared between C and assembly code
*/
/* bilinear interpolation precision (must be < 8) */
#define BILINEAR_INTERPOLATION_BITS 7
#define BILINEAR_INTERPOLATION_RANGE (1 << BILINEAR_INTERPOLATION_BITS)
#include "pixman-arm-neon-asm.h"
/* Global configuration options and preferences */
/*
* The code can optionally make use of unaligned memory accesses to improve
* performance of handling leading/trailing pixels for each scanline.
* Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
* example in linux if unaligned memory accesses are not configured to
* generate.exceptions.
*/
.set RESPECT_STRICT_ALIGNMENT, 1
/*
* Set default prefetch type. There is a choice between the following options:
*
* PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
* as NOP to workaround some HW bugs or for whatever other reason)
*
* PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
* advanced prefetch intruduces heavy overhead)
*
* PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
* which can run ARM and NEON instructions simultaneously so that extra ARM
* instructions do not add (many) extra cycles, but improve prefetch efficiency)
*
* Note: some types of function can't support advanced prefetch and fallback
* to simple one (those which handle 24bpp pixels)
*/
.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
/* Prefetch distance in pixels for simple prefetch */
.set PREFETCH_DISTANCE_SIMPLE, 64
/*
* Implementation of pixman_composite_over_8888_0565_asm_neon
*
* This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
* performs OVER compositing operation. Function fast_composite_over_8888_0565
* from pixman-fast-path.c does the same in C and can be used as a reference.
*
* First we need to have some NEON assembly code which can do the actual
* operation on the pixels and provide it to the template macro.
*
* Template macro quite conveniently takes care of emitting all the necessary
* code for memory reading and writing (including quite tricky cases of
* handling unaligned leading/trailing pixels), so we only need to deal with
* the data in NEON registers.
*
* NEON registers allocation in general is recommented to be the following:
* d0, d1, d2, d3 - contain loaded source pixel data
* d4, d5, d6, d7 - contain loaded destination pixels (if they are needed)
* d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
* d28, d29, d30, d31 - place for storing the result (destination pixels)
*
* As can be seen above, four 64-bit NEON registers are used for keeping
* intermediate pixel data and up to 8 pixels can be processed in one step
* for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
*
* This particular function uses the following registers allocation:
* d0, d1, d2, d3 - contain loaded source pixel data
* d4, d5 - contain loaded destination pixels (they are needed)
* d28, d29 - place for storing the result (destination pixels)
*/
/*
* Step one. We need to have some code to do some arithmetics on pixel data.
* This is implemented as a pair of macros: '*_head' and '*_tail'. When used
* back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
* perform all the needed calculations and write the result to {d28, d29}.
* The rationale for having two macros and not just one will be explained
* later. In practice, any single monolitic function which does the work can
* be split into two parts in any arbitrary way without affecting correctness.
*
* There is one special trick here too. Common template macro can optionally
* make our life a bit easier by doing R, G, B, A color components
* deinterleaving for 32bpp pixel formats (and this feature is used in
* 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
* instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we
* actually use d0 register for blue channel (a vector of eight 8-bit
* values), d1 register for green, d2 for red and d3 for alpha. This
* simple conversion can be also done with a few NEON instructions:
*
* Packed to planar conversion:
* vuzp.8 d0, d1
* vuzp.8 d2, d3
* vuzp.8 d1, d3
* vuzp.8 d0, d2
*
* Planar to packed conversion:
* vzip.8 d0, d2
* vzip.8 d1, d3
* vzip.8 d2, d3
* vzip.8 d0, d1
*
* But pixel can be loaded directly in planar format using VLD4.8 NEON
* instruction. It is 1 cycle slower than VLD1.32, so this is not always
* desirable, that's why deinterleaving is optional.
*
* But anyway, here is the code:
*/
/*
* OK, now we got almost everything that we need. Using the above two
* macros, the work can be done right. But now we want to optimize
* it a bit. ARM Cortex-A8 is an in-order core, and benefits really
* a lot from good code scheduling and software pipelining.
*
* Let's construct some code, which will run in the core main loop.
* Some pseudo-code of the main loop will look like this:
* head
* while (...) {
* tail
* head
* }
* tail
*
* It may look a bit weird, but this setup allows to hide instruction
* latencies better and also utilize dual-issue capability more
* efficiently (make pairs of load-store and ALU instructions).
*
* So what we need now is a '*_tail_head' macro, which will be used
* in the core main loop. A trivial straightforward implementation
* of this macro would look like this:
*
* pixman_composite_over_8888_0565_process_pixblock_tail
* vst1.16 {d28, d29}, [DST_W, :128]!
* vld1.16 {d4, d5}, [DST_R, :128]!
* vld4.32 {d0, d1, d2, d3}, [SRC]!
* pixman_composite_over_8888_0565_process_pixblock_head
* cache_preload 8, 8
*
* Now it also got some VLD/VST instructions. We simply can't move from
* processing one block of pixels to the other one with just arithmetics.
* The previously processed data needs to be written to memory and new
* data needs to be fetched. Fortunately, this main loop does not deal
* with partial leading/trailing pixels and can load/store a full block
* of pixels in a bulk. Additionally, destination buffer is already
* 16 bytes aligned here (which is good for performance).
*
* New things here are DST_R, DST_W, SRC and MASK identifiers. These
* are the aliases for ARM registers which are used as pointers for
* accessing data. We maintain separate pointers for reading and writing
* destination buffer (DST_R and DST_W).
*
* Another new thing is 'cache_preload' macro. It is used for prefetching
* data into CPU L2 cache and improve performance when dealing with large
* images which are far larger than cache size. It uses one argument
* (actually two, but they need to be the same here) - number of pixels
* in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
* details about this macro. Moreover, if good performance is needed
* the code from this macro needs to be copied into '*_tail_head' macro
* and mixed with the rest of code for optimal instructions scheduling.
* We are actually doing it below.
*
* Now after all the explanations, here is the optimized code.
* Different instruction streams (originaling from '*_head', '*_tail'
* and 'cache_preload' macro) use different indentation levels for
* better readability. Actually taking the code from one of these
* indentation levels and ignoring a few VLD/VST instructions would
* result in exactly the code from '*_head', '*_tail' or 'cache_preload'
* macro!
*/
/*
* And now the final part. We are using 'generate_composite_function' macro
* to put all the stuff together. We are specifying the name of the function
* which we want to get, number of bits per pixel for the source, mask and
* destination (0 if unused, like mask in this case). Next come some bit
* flags:
* FLAG_DST_READWRITE - tells that the destination buffer is both read
* and written, for write-only buffer we would use
* FLAG_DST_WRITEONLY flag instead
* FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
* and separate color channels for 32bpp format.
* The next things are:
* - the number of pixels processed per iteration (8 in this case, because
* that's the maximum what can fit into four 64-bit NEON registers).
* - prefetch distance, measured in pixel blocks. In this case it is 5 times
* by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
* prefetch distance can be selected by running some benchmarks.
*
* After that we specify some macros, these are 'default_init',
* 'default_cleanup' here which are empty (but it is possible to have custom
* init/cleanup macros to be able to save/restore some extra NEON registers
* like d8-d15 or do anything else) followed by
* 'pixman_composite_over_8888_0565_process_pixblock_head',
* 'pixman_composite_over_8888_0565_process_pixblock_tail' and
* 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
* which we got implemented above.
*
* The last part is the NEON registers allocation scheme.
*/
/******************************************************************************/
/******************************************************************************/
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_head
vmvn.8 d24, d3 /* get inverted alpha */
/* do alpha blending */
vmull.u8 q8, d24, d4
vmull.u8 q9, d24, d5
vmull.u8 q10, d24, d6
vmull.u8 q11, d24, d7
.endm
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
.endm
/******************************************************************************/
.macro pixman_composite_over_8888_8888_process_pixblock_head
pixman_composite_out_reverse_8888_8888_process_pixblock_head
.endm
.macro pixman_composite_over_8888_8888_process_pixblock_tail
pixman_composite_out_reverse_8888_8888_process_pixblock_tail
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vrshr.u16 q14, q8, #8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
PF cmp PF_X, ORIG_W
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
fetch_src_pixblock
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
vmvn.8 d22, d3
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q8, d22, d4
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q9, d22, d5
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
vmull.u8 q10, d22, d6
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vmull.u8 q11, d22, d7
.endm
generate_composite_function \
pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
generate_composite_function_single_scanline \
pixman_composite_scanline_over_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8888_process_pixblock_head
/* deinterleaved source pixels in {d0, d1, d2, d3} */
/* inverted alpha in {d24} */
/* destination pixels in {d4, d5, d6, d7} */
vmull.u8 q8, d24, d4
vmull.u8 q9, d24, d5
vmull.u8 q10, d24, d6
vmull.u8 q11, d24, d7
.endm
.macro pixman_composite_over_n_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q2, q10, #8
vrshr.u16 q3, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q2, q10
vraddhn.u16 d31, q3, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
.macro pixman_composite_over_n_8888_process_pixblock_tail_head
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q2, q10, #8
vrshr.u16 q3, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q2, q10
vraddhn.u16 d31, q3, q11
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vqadd.u8 q14, q0, q14
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0x0F
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vqadd.u8 q15, q1, q15
PF cmp PF_X, ORIG_W
vmull.u8 q8, d24, d4
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vmull.u8 q9, d24, d5
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q10, d24, d6
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q11, d24, d7
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
.macro pixman_composite_over_n_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d3[0]}, [DUMMY]
vdup.8 d0, d3[0]
vdup.8 d1, d3[1]
vdup.8 d2, d3[2]
vdup.8 d3, d3[3]
vmvn.8 d24, d3 /* get inverted alpha */
.endm
generate_composite_function \
pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_n_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_src_n_8888_process_pixblock_head
.endm
.macro pixman_composite_src_n_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_n_8888_process_pixblock_tail_head
vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
.endm
.macro pixman_composite_src_n_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d0[0]}, [DUMMY]
vsli.u64 d0, d0, #32
vorr d1, d0, d0
vorr q1, q0, q0
.endm
.macro pixman_composite_src_n_8888_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
0, /* prefetch distance */ \
pixman_composite_src_n_8888_init, \
pixman_composite_src_n_8888_cleanup, \
pixman_composite_src_n_8888_process_pixblock_head, \
pixman_composite_src_n_8888_process_pixblock_tail, \
pixman_composite_src_n_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_8888_8888_process_pixblock_head
.endm
.macro pixman_composite_src_8888_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
fetch_src_pixblock
cache_preload 8, 8
.endm
generate_composite_function \
pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_8888_8888_process_pixblock_head, \
pixman_composite_src_8888_8888_process_pixblock_tail, \
pixman_composite_src_8888_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
|
0BAB1/HOLY_CORE_COURSE
| 1,138
|
1_fpga_edition/fpga/test_programs/test.s
|
# Blink leds
#
# Assembly to blink LEDs once / second in a counter motion.
# Uses a cache miss to write back.
#
# BRH 11/12
.section .text
.align 2
.global _start
start:
# Initialization
lui x6, 0x2 # 00002337 Load GPIO base address x6 <= 0x00002000
lui x7, 0x2 # same for x7 <= 0x00002000
ori x7, x7, -1 # FFF3E393 set GPIO address limit x7 <= 0x00002FFF
csrrw x0, 0x7C1, x6 # set base in csr
csrrw x0, 0x7C2, x7 # set limit in csr
addi x18, x0, 0 # 00000913 main counter, set to 0
j loop # 0040006f
loop:
addi x18, x18, 0x1 # 00190913 increment counter
sw x18, 0(x6) # 01232023 write new counter value to GPIO based address
# Delay loop: Wait for 50,000,000 clock cycles = 1s @ 50Mhz
li x21, 50000000 # 02fafab7 / 080a8a93 Load 50,000,000 into x21
delay_loop:
addi x21, x21, -1 # fffa8a93 Decrement x21
bnez x21, delay_loop # fe0a9ee3 If x21 != 0, continue looping
j loop # fd9ff06f Restart the loop
|
0BAB1/HOLY_CORE_COURSE
| 1,037
|
1_fpga_edition/fpga/test_programs/hello_world/hello.s
|
.section .text
.align 1
.global _start
_start:
# Setup uncached MMIO region from 0x2000 to 0x2FFF
lui x6, 0x2 # x6 = 0x2000
lui x7, 0x2
ori x7, x7, -1 # x7 = 0x2FFF
csrrw x0, 0x7C1, x6 # MMIO base
csrrw x0, 0x7C2, x7 # MMIO limit
# UARTLite base at 0x2800
li x10, 0x2800 # x10 = UART base
la x11, string # x11 = address of string
li x12, 14 # x12 = length of string
loop:
lb x13, 0(x11) # load byte from string
wait:
lw x14, 8(x10) # read UART status (8h)
andi x14, x14, 0x8 # test bit n°3 (TX FIFO not full)
bnez x14, wait # if not ready, spin
sb x13, 4(x10) # write byte to TX register (4h)
addi x11, x11, 0x1 # next char
addi x12, x12, -1 # decrement counter
bnez x12, loop # loop until done
# Done
j .
.section .rodata
.align 1
string:
.asciz "Hello, World\n\r"
|
0BAB1/HOLY_CORE_COURSE
| 7,408
|
1_fpga_edition/fpga/test_programs/I2C_pressure/pressure.s
|
# HOLY CORE PROGRAM
#
# Read the I2C BMP280 sensor and print value to UART as hexadecimal
#
# BRH - 30/05/25
.section .text
.align 1
.global _start
# NOTES :
# 100h => Control
# 104h => Sattus
# 108h => TX_FIFO
# 10Ch => RX_FIFO
# I²C READ (from BMP280 datasheet)
#
# To be able to read registers, first the register address must be sent in write mode (slave address
# 111011X - 0). Then either a stop or a repeated start condition must be generated. After this the
# slave is addressed in read mode (RW = ‘1’) at address 111011X - 1, after which the slave sends
# out data from auto-incremented register addresses until a NOACKM and stop condition occurs.
# This is depicted in Figure 8, where two bytes are read from register 0xF6 and 0xF7.
#
# Protocol :
#
# 1. we START
# 2. we transmit slave addr 0x77 and ask write mode
# 3. After ACK_S we transmit register to read address
# 4. After ACK_S, we RESTART ot STOP + START and initiate a read request on 0x77, ACK_S
# 5. Regs are transmitted 1 by 1 until NO ACK_M + STOP
_start:
# Setup uncached MMIO region from 0x2000 to 0x3800
lui x6, 0x2 # x6 = 0x2000
lui x7, 0x3
ori x7, x7, -1 # x7 = 0x3800
csrrw x0, 0x7C1, x6 # MMIO base
csrrw x0, 0x7C2, x7 # MMIO limit
###########################
# config I2C AXI IP Core
###########################
# Load the AXI_L - I2C IP's base address
lui x10, 0x3 # x10 = 0x3000
# Soft reset AXI- I2C IP core
li x14, 0xA
sw x14, 0x040(x10) # soft reset
# Reset TX_FIFO
# Enable the AXI IIC, remove the TX_FIFO reset, disable the general call
li x14, 0x3 # EN = 1, Reset = 1
sw x14, 0x100(x10)
li x14, 0x1 # EN = 1, Reset = 0
sw x14, 0x100(x10)
###########################
# configure the sensor :
###########################
check_loop_configure_one:
# Check all FIFOs empty and bus not bus
lw x14, 0x104(x10)
andi x14, x14, 0x34 # check flags : RX_FIFO_FULL, TX_FIFO_FULL, BB (Bus Busy)
bnez x14, check_loop_configure_one
# 1st, we configure with 0xF5
# Write to the TX_FIFO to specify the reg we'll read
li x14, 0x1EE # start : specify IIC slave base addr and write
li x15, 0xF5 # specify reg address as data
li x16, 0x200 # data = 00 + stop
sw x14, 0x108(x10)
sw x15, 0x108(x10)
sw x16, 0x108(x10)
check_loop_configure_two:
# Check all FIFOs empty and bus not bus
lw x14, 0x104(x10)
andi x14, x14, 0x34 # check flags : RX_FIFO_FULL, TX_FIFO_FULL, BB (Bus Busy)
bnez x14, check_loop_configure_two
# 2nd, we configure measure with 0xF4
# here we only FORCE 1 measure
# Write to the TX_FIFO to specify the reg we'll read
li x14, 0x1EE # start : specify IIC slave base addr and write
li x15, 0xF4 # specify reg address as data
li x16, 0x209 # data = 09 (os = x2 and force mode) + stop
sw x14, 0x108(x10)
sw x15, 0x108(x10)
sw x16, 0x108(x10)
###########################
# WAit for measurement
###########################
# We then poll 0xF3 bit' #3 (0x8 as value) until its done (0)
wait_for_measurement:
measure_loop_one:
# Check all FIFOs empty and bus not bus
lw x14, 0x104(x10)
andi x14, x14, 0x34 # check flags : RX_FIFO_FULL, TX_FIFO_FULL, BB (Bus Busy)
bnez x14, measure_loop_one
# Write to the TX_FIFO to specify the reg we'll read : (0xF3 = status)
li x14, 0x1EE # start : specify IIC slave base addr and write
li x15, 0x2F3 # specify reg address as data : stop
sw x14, 0x108(x10)
sw x15, 0x108(x10)
# WAIT TEST
li t0, 2500 # each loop = 4 cycles → 2500 × 4 = ~10,000
delay_loop:
addi t0, t0, -1
bnez t0, delay_loop
measure_loop_two:
# Same here
lw x14, 0x104(x10)
andi x14, x14, 0x34 # bit 2 = BB (Bus Busy)
bnez x14, measure_loop_two
# Write to the TX fifo to request read ans specify want want 1 byte
li x14, 0x1EF # start : request read on IIC slave
li x15, 0x201 # master reciever mode : set stop after 1 byte
sw x14, 0x108(x10)
sw x15, 0x108(x10)
measure_read_loop:
# Wait for RX_FIFO not empty
lw x14, 0x104(x10)
andi x14, x14, 0x40 # check flags : RX_FIFO_EMPTY
bnez x14, measure_read_loop
# Read the RX byte
lb x16, 0x10C(x10)
# Check bit 3, if it's high, then the sensor is still emasuring.
andi x16, x16, 0x8
bnez x16, wait_for_measurement
###############################
# read measurement
###############################
# WAIT TEST
li t0, 2500 # each loop = 4 cycles → 2500 × 4 = ~10,000
delay_loop_a:
addi t0, t0, -1
bnez t0, delay_loop_a
check_loop_one:
# Check all FIFOs empty and bus not bus
lw x14, 0x104(x10)
andi x14, x14, 0x34 # check flags : RX_FIFO_FULL, TX_FIFO_FULL, BB (Bus Busy)
bnez x14, check_loop_one
# Write to the TX_FIFO to specify the reg we'll read : (0xF7 = press_msb)
li x14, 0x1EE # start : specify IIC slave base addr and write
li x15, 0x2F8 # specify reg address as data : stop
sw x14, 0x108(x10)
sw x15, 0x108(x10)
# WAIT TEST
li t0, 2500 # each loop = 4 cycles → 2500 × 4 = ~10,000
delay_loop_b:
addi t0, t0, -1
bnez t0, delay_loop_b
check_loop_two:
# Same here
lw x14, 0x104(x10)
andi x14, x14, 0x34 # bit 2 = BB (Bus Busy)
bnez x14, check_loop_two
# Write to the TX fifo to request read ans specify want want 1 byte
li x14, 0x1EF # start : request read on IIC slave
li x15, 0x201 # master reciever mode : set stop after 1 byte
sw x14, 0x108(x10)
sw x15, 0x108(x10)
# WAIT TEST
li t0, 2500 # each loop = 4 cycles → 2500 × 4 = ~10,000
delay_loop_c:
addi t0, t0, -1
bnez t0, delay_loop_c
read_loop:
# Wait for RX_FIFO not empty
lw x14, 0x104(x10)
andi x14, x14, 0x40 # check flags : RX_FIFO_EMPTY
bnez x14, read_loop
# Read the RX byte
lb x16, 0x10C(x10)
# ==============================
# Write it to UART
# ==============================
li x17, 0x2800 # x17 = UART base
# ---------- High nibble ----------
srli x14, x16, 4 # x14 = high nibble (bits 7:4)
andi x14, x14, 0xF # mask to 4 bits
li x15, '0' # ASCII base
add x14, x14, x15 # x14 = ASCII character
li t1, 58
blt x14, t1, send_hi # 58 = '9' + 1
addi x14, x14, 7 # jump to 'A' for 10–15
send_hi:
wait_hi:
lw t0, 8(x17)
andi t0, t0, 0x8
bnez t0, wait_hi
sb x14, 4(x17)
# ---------- Low nibble ----------
andi x14, x16, 0xF # x14 = low nibble (bits 3:0)
li x15, '0'
add x14, x14, x15
li t1, 58
blt x14, t1, send_lo # 58 = '9' + 1
addi x14, x14, 7
send_lo:
wait_lo:
lw t0, 8(x17)
andi t0, t0, 0x8
bnez t0, wait_lo
sb x14, 4(x17)
# ---------- Newline ----------
li x14, 0x0A
wait_nl:
lw t0, 8(x17)
andi t0, t0, 0x8
bnez t0, wait_nl
sb x14, 4(x17)
# ---------- return ----------
li x14, '\r'
wait_ret:
lw t0, 8(x17)
andi t0, t0, 0x8
bnez t0, wait_ret
sb x14, 4(x17)
j .
|
0BAB1/HOLY_CORE_COURSE
| 8,187
|
2_soc_software_edition/tb/holy_core/test.s
|
# HOLY_CORE BASIC TEST PROGRAM
#
# This program tests basic behavior of the core.
# This test does not ensure compliance but rather
# serve as a quick reference to know if
# the design compiles and if a change
# broke the basic CPU behavior.
#
# BRH 7/25
.section .text
.global _start
_start:
# DATA ADDR STORE
lui x3, 0x1
# LW TEST START
lw x18, 8(x3)
# SW TEST START
sw x18, 12(x3)
# ADD TEST START
lw x19, 16(x3)
add x20, x18, x19
# AND TEST START
and x21, x18, x20
lw x5, 20(x3)
lw x6, 24(x3)
or x7, x5, x6
# BEQ TEST START
beq x6, x7, _start # should not branch
lw x22, 8(x3)
beq x18, x22, beq_lw
nop
nop
beq_to_end:
beq x0, x0, beq_end
beq_lw:
lw x22, 0(x3)
beq x22, x22, beq_to_end
beq_end:
nop
# JAL TEST START
jal x1, jal_lw
nop
nop
nop
nop
jal_lw:
lw x7, 12(x3)
# ADDI TEST START
addi x26, x7, 0x1AB
nop
# AUIPC TEST START
auipc x5, 0x1F1FA
lui x5, 0x2F2FA
# SLTI TEST START
nop
slti x23, x23, 1
# SLTIU TEST START
nop
sltiu x22, x19, 1
# XORI TEST START
nop
xori x19, x18, 0
# ORI TEST START
nop
ori x21, x20, 0
# ANDI TEST START
andi x18, x20, 0x7FF
nop
andi x20, x21, 0
# SLLI TEST START
slli x19, x19, 4
nop
# SRLI TEST START
srli x20, x19, 4
nop
# SRAI TEST START
srai x21, x21, 4
nop
# SUB TEST START
sub x18, x21, x18
# SLL TEST START
addi x7, x0, 8
sll x18, x18, x7
# SLT TEST START
slt x17, x22, x23
# SLTU TEST START
sltu x17, x22, x23
# XOR TEST START
xor x17, x18, x19
# SRL TEST START
srl x8, x19, x7
# SRA TEST START
sra x8, x19, x7
# BLT TEST START
blt x17, x8, blt_addi # not taken
blt x8, x17, bne_test # taken
blt_addi:
addi x8, x0, 12 # never exec !
# BNE TEST START
bne_test:
bne x8, x8, bne_addi # not taken
bne x8, x17, bge_test # taken
bne_addi:
addi x8, x0, 12
# BGE TEST START
bge_test:
bge x8, x17, bge_addi # not taken
bge x8, x8, bltu_test # taken
bge_addi:
addi x8, x0, 12
# BLTU TEST START
bltu_test:
bltu x8, x17, bltu_addi # not taken
bltu x17, x8, bgeu_test # taken
bltu_addi:
addi x8, x0, 12
# BGEU TEST START
bgeu_test:
bgeu x17, x8, bgeu_addi # not taken
bgeu x8, x17, jalr_test # taken
bgeu_addi:
addi x8, x0, 12
# JALR TEST START
jalr_test:
auipc x7, 0
addi x7, x7, 20
jalr x1, -4(x7)
addi x8, x0, 12
# SB TEST START
nop
sb x8, 6(x3)
# SH TEST START
nop
nop
sh x8, 6(x3)
# LB TEST START
addi x7, x3, 0x10
nop
lb x18, -1(x7)
# LBU TEST START
lbu x19, -3(x7)
# LH TEST START
nop
lh x20, -6(x7)
# LHU TEST START
nop
lhu x21, -6(x7)
# CACHE WB TEST
addi x7, x3, 0x200
lw x20, 0(x7)
# CSR FLUSH TEST
addi x20, x0, 1
csrrw x21, 0x7C0, x20
# CSR $ RANGE TEST
addi x20, x0, 0
lui x20, 0x2
addi x21, x20, 0x200
csrrw x0, 0x7C1, x20
csrrw x0, 0x7C2, x21
addi x20, x20, 4
lui x22, 0xABCD1
addi x22, x22, 0x111
sw x22, 0(x20)
lw x22, 4(x20)
lw x22, 0(x20)
################
# SW INTR TEST
################
lui x4, 0x3 # Clint base addr
la x6, trap # Trap handler base addr
csrrw x0, mtvec, x6 # we set mtvec to the trap handler's addr
# we configure CSRs to enable interrupts
li t0, (1 << 11) | (1 << 7) | (1 << 3)
csrw mie, t0
li t0, (1 << 3)
csrw mstatus, t0
addi x5, x0, 1
sw x5, 0(x4) # write 1 to clint's msip
# return should happen here
################
# TIMER INTR TEST
################
lui x7, 0x4
add x5, x4, x7 # build Clint's mtimecmp base addr
lui x7, 0xC
add x7, x4, x7 # build Clint's mtime "near" base addr
sw x0, 4(x5) # set high word of mtimecmp to 0
lw x8, -8(x7) # get the current mtime value
addi x8, x8, 0x10 # add 16 to the timer and store it back to timer cmp
sw x8, 0(x5)
# loop until timer intr happens
wait_for_timer_irq:
j wait_for_timer_irq
# handler returns on this NOP
nop
################
# EXTERNAL INTR TEST
################
# set up plic by enabling intr
li x4, 0x0000F000 # plic base addr
ori x5, x0, 0x1
sw x5, 0(x4) # enable ext intr #1
nop # signal tb we are about to wait for ext intr
wait_for_ext_irq:
j wait_for_ext_irq
# handler returns on this NOP
nop
################
# ECALL EXCEPTION TEST
################
nop
ecall # provoke ecall
################
# DEBUG MODE TEST
################
wait_for_debug_mode:
la t0, debug_rom # load addrs for compiler issues
la t1, debug_exception # load addrs for compiler issues
nop
# tb will send a debug request, effectively jumping
# to "debug ROM", which we can find below
j wait_for_debug_mode
#########################
# Trap handler
#########################
trap:
csrrs x30, 0x342, x0 # store mcause in x30
soft_irq_check: # soft intr handler
li x31, 0x80000003
bne x30, x31, timer_irq_check
# clear the soft interrupt
sw x0, 0(x4)
# mepc += 4
# to skip intr write on return
csrrs x31, 0x341, x0
addi x31, x31, 0x4
csrrw x0, 0x341, x31
j m_ret
timer_irq_check: # timer irq handler
li x31, 0x80000007
bne x30, x31, ext_irq_check
# clear tht imer intr by pumping the mtimecmp to full F
li x9, 0xFFFFFFFF
sw x9, 0(x5) # x5 should already contain mtimecmp base addr
# mepc += 4
# to skip intr write on return
csrrs x31, 0x341, x0
addi x31, x31, 0x4
csrrw x0, 0x341, x31
j m_ret
ext_irq_check:
li x31, 0x8000000B
bne x30, x31, ecall_check
# claim the interrupt
lw x8, 4(x4)
# Do a loop as place holder
li t0, 32
loop:
addi t0, t0, -1
bnez t0, loop
# clear the intr using NOP which
# the tb will use as a placeholder
# to clear the ext intr
nop
# signal completion to the plic
sw x8, 4(x4)
# mepc += 4
# to skip intr write on return
csrrs x31, 0x341, x0
addi x31, x31, 0x4
csrrw x0, 0x341, x31
j m_ret
ecall_check:
li x31, 0x0000000B
bne x30, x31, m_ret
# Do a loop as placeholder
li t0, 32
loop2:
addi t0, t0, -1
bnez t0, loop2
# mepc += 4
# to skip intr write on return
csrrs x31, 0x341, x0
addi x31, x31, 0x4
csrrw x0, 0x341, x31
j m_ret
m_ret: # return form trap routine
mret # return to where we left the program
#########################
# Debug ROM
#########################
debug_rom:
# some nops and a dret
nop
# if dscratch0 == 1, we want to do the single
#step debug test
csrr t2, dscratch0
li t3, 0x1
beq t2, t3, single_step_test
nop
nop
# we first make an ebreak test
# which should return to the "normal" park loop
# and then we'll branch to test dret bhavior
beq x0, x5, d_ret
addi x5, x0, 0x0
ebreak
d_ret:
nop
nop
nop
# right before dret, we set dscratch0 to 1
# to signal the first debug pass was done
csrwi dscratch0, 0x1
dret
debug_exception:
nop
nop
nop
nop
nop
j debug_rom
single_step_test:
nop
nop
# we set dscr's step flag to 1
# and d_ret. the cu should come back
# right after. Except if single step already was 1
# in which case we clear it, write 2 to scratch for
# the testbench to check and leave
csrr t0, 0x7b0 # dcsr
andi t1, t0, (1 << 2) # t1 = dcsr.step ? 4 : 0
beqz t1, set_step # if step == 0, go set it
clear_step:
csrci 0x7b0, 4
li t2, 2
csrw 0x7b2, t2
dret
set_step:
csrsi 0x7b0, 4
dret
|
0intro/9hist
| 8,780
|
alphapc/l.s
|
#include "mem.h"
#define SP R30
#define HI_IPL 6 /* use 7 to disable mchecks */
TEXT _main(SB), $-8
MOVQ $setSB(SB), R29
MOVQ R29, R16
CALL_PAL $PALwrkgp
MOVQ $mach0(SB), R(MACH)
MOVQ $(BY2PG-8)(R(MACH)), R30
MOVQ R31, R(USER)
MOVQ R31, 0(R(MACH))
MOVQ $edata(SB), R1
MOVQ $end(SB), R2
clrbss:
MOVQ R31, (R1)
ADDQ $8, R1
CMPUGT R1, R2, R3
BEQ R3, clrbss
MOVL R0, bootconf(SB) /* passed in from boot loader */
_fpinit:
MOVQ $1, R16
CALL_PAL $PALwrfen
MOVQ initfpcr(SB), R1 /* MOVQ $0x2800800000000000, R1 */
MOVQ R1, (R30)
MOVT (R30), F1
MOVT F1, FPCR
MOVT $0.5, F28
ADDT F28, F28, F29
ADDT F29, F29, F30
MOVT F31, F1
MOVT F31, F2
MOVT F31, F3
MOVT F31, F4
MOVT F31, F5
MOVT F31, F6
MOVT F31, F7
MOVT F31, F8
MOVT F31, F9
MOVT F31, F10
MOVT F31, F11
MOVT F31, F12
MOVT F31, F13
MOVT F31, F14
MOVT F31, F15
MOVT F31, F16
MOVT F31, F17
MOVT F31, F18
MOVT F31, F19
MOVT F31, F20
MOVT F31, F21
MOVT F31, F22
MOVT F31, F23
MOVT F31, F24
MOVT F31, F25
MOVT F31, F26
MOVT F31, F27
JSR main(SB)
MOVQ $_divq(SB), R31 /* touch _divq etc.; doesn't need to execute */
MOVQ $_divl(SB), R31 /* touch _divl etc.; doesn't need to execute */
RET
TEXT setpcb(SB), $-8
MOVQ R30, (R0)
AND $0x7FFFFFFF, R0, R16 /* make address physical */
CALL_PAL $PALswpctx
RET
GLOBL mach0(SB), $(MAXMACH*BY2PG)
GLOBL init_ptbr(SB), $8
TEXT firmware(SB), $-8
CALL_PAL $PALhalt
TEXT xxfirmware(SB), $-8
CALL_PAL $PALhalt
TEXT splhi(SB), $0
MOVL R26, 4(R(MACH)) /* save PC in m->splpc */
MOVQ $HI_IPL, R16
CALL_PAL $PALswpipl
RET
TEXT spllo(SB), $0
MOVQ R31, R16
CALL_PAL $PALswpipl
RET
TEXT splx(SB), $0
MOVL R26, 4(R(MACH)) /* save PC in m->splpc */
TEXT splxpc(SB), $0 /* for iunlock */
MOVQ R0, R16
CALL_PAL $PALswpipl
RET
TEXT spldone(SB), $0
RET
TEXT islo(SB), $0
CALL_PAL $PALrdps
AND $IPL, R0
XOR $HI_IPL, R0
RET
TEXT mb(SB), $-8
MB
RET
TEXT icflush(SB), $-8
CALL_PAL $PALimb
RET
TEXT tlbflush(SB), $-8
MOVQ R0, R16
MOVL 4(FP), R17
CALL_PAL $PALtbi
RET
TEXT swpctx(SB), $-8
MOVQ R0, R16
AND $0x7FFFFFFF, R16 /* make address physical */
CALL_PAL $PALswpctx
RET
TEXT wrent(SB), $-8
MOVQ R0, R17
MOVL 4(FP), R16
CALL_PAL $PALwrent
RET
TEXT wrvptptr(SB), $-8
MOVQ R0, R16
CALL_PAL $PALwrvptptr
RET
TEXT cserve(SB), $-8
MOVQ R0, R16
MOVL 4(FP), R17
CALL_PAL $PALcserve
RET
TEXT setlabel(SB), $-8
MOVL R30, 0(R0)
MOVL R26, 4(R0)
MOVQ $0, R0
RET
TEXT gotolabel(SB), $-8
MOVL 0(R0), R30
MOVL 4(R0), R26
MOVQ $1, R0
RET
TEXT tas(SB), $-8
MOVQ R0, R1 /* l */
tas1:
MOVLL (R1), R0 /* l->key */
BNE R0, tas2
MOVQ $1, R2
MOVLC R2, (R1) /* l->key = 1 */
BEQ R2, tas1 /* write failed, try again? */
tas2:
RET
TEXT fpenab(SB), $-8
MOVQ R0, R16
CALL_PAL $PALwrfen
RET
TEXT rpcc(SB), $0
MOVL R0, R1
MOVL $0, R0
WORD $0x6000C000 /* RPCC R0 */
BEQ R1, _ret
MOVQ R0, (R1)
_ret:
RET
/*
* Exception handlers. The stack frame looks like this:
*
* R30+0: (unused) link reg storage (R26) (32 bits)
* R30+4: padding for alignment (32 bits)
* R30+8: trap()'s first arg storage (R0) (32 bits -- type Ureg*)
* R30+12: padding for alignment (32 bits)
* R30+16: first 31 fields of Ureg, saved here (31*64 bits)
* R30+264: other 6 fields of Ureg, saved by PALcode (6*64 bits)
* R30+312: previous value of KSP before trap
*/
TEXT arith(SB), $-8
SUBQ $(4*BY2WD+31*BY2V), R30
MOVQ R0, (4*BY2WD+4*BY2V)(R30)
MOVQ $1, R0
JMP trapcommon
TEXT illegal0(SB), $-8
SUBQ $(4*BY2WD+31*BY2V), R30
MOVQ R0, (4*BY2WD+4*BY2V)(R30)
MOVQ $2, R0
JMP trapcommon
TEXT fault0(SB), $-8
SUBQ $(4*BY2WD+31*BY2V), R30
MOVQ R0, (4*BY2WD+4*BY2V)(R30)
MOVQ $4, R0
JMP trapcommon
TEXT unaligned(SB), $-8
SUBQ $(4*BY2WD+31*BY2V), R30
MOVQ R0, (4*BY2WD+4*BY2V)(R30)
MOVQ $6, R0
JMP trapcommon
TEXT intr0(SB), $-8
SUBQ $(4*BY2WD+31*BY2V), R30
MOVQ R0, (4*BY2WD+4*BY2V)(R30)
MOVQ $3, R0
trapcommon:
MOVQ R0, (4*BY2WD+0*BY2V)(R30)
MOVQ R16, (4*BY2WD+1*BY2V)(R30)
MOVQ R17, (4*BY2WD+2*BY2V)(R30)
MOVQ R18, (4*BY2WD+3*BY2V)(R30)
/* R0 already saved, (4*BY2WD+4*BY2V)(R30) */
MOVQ R1, (4*BY2WD+5*BY2V)(R30)
MOVQ R2, (4*BY2WD+6*BY2V)(R30)
MOVQ R3, (4*BY2WD+7*BY2V)(R30)
MOVQ R4, (4*BY2WD+8*BY2V)(R30)
MOVQ R5, (4*BY2WD+9*BY2V)(R30)
MOVQ R6, (4*BY2WD+10*BY2V)(R30)
MOVQ R7, (4*BY2WD+11*BY2V)(R30)
MOVQ R8, (4*BY2WD+12*BY2V)(R30)
MOVQ R9, (4*BY2WD+13*BY2V)(R30)
MOVQ R10, (4*BY2WD+14*BY2V)(R30)
MOVQ R11, (4*BY2WD+15*BY2V)(R30)
MOVQ R12, (4*BY2WD+16*BY2V)(R30)
MOVQ R13, (4*BY2WD+17*BY2V)(R30)
MOVQ R14, (4*BY2WD+18*BY2V)(R30)
MOVQ R15, (4*BY2WD+19*BY2V)(R30)
MOVQ R19, (4*BY2WD+20*BY2V)(R30)
MOVQ R20, (4*BY2WD+21*BY2V)(R30)
MOVQ R21, (4*BY2WD+22*BY2V)(R30)
MOVQ R22, (4*BY2WD+23*BY2V)(R30)
MOVQ R23, (4*BY2WD+24*BY2V)(R30)
MOVQ R24, (4*BY2WD+25*BY2V)(R30)
MOVQ R25, (4*BY2WD+26*BY2V)(R30)
MOVQ R26, (4*BY2WD+27*BY2V)(R30)
MOVQ R27, (4*BY2WD+28*BY2V)(R30)
MOVQ R28, (4*BY2WD+29*BY2V)(R30)
MOVQ $HI_IPL, R16
CALL_PAL $PALswpipl
CALL_PAL $PALrdusp
MOVQ R0, (4*BY2WD+30*BY2V)(R30) /* save USP */
MOVQ $mach0(SB), R(MACH)
MOVQ $(4*BY2WD)(R30), R0
JSR trap(SB)
trapret:
MOVQ (4*BY2WD+30*BY2V)(R30), R16 /* USP */
CALL_PAL $PALwrusp /* ... */
MOVQ (4*BY2WD+4*BY2V)(R30), R0
MOVQ (4*BY2WD+5*BY2V)(R30), R1
MOVQ (4*BY2WD+6*BY2V)(R30), R2
MOVQ (4*BY2WD+7*BY2V)(R30), R3
MOVQ (4*BY2WD+8*BY2V)(R30), R4
MOVQ (4*BY2WD+9*BY2V)(R30), R5
MOVQ (4*BY2WD+10*BY2V)(R30), R6
MOVQ (4*BY2WD+11*BY2V)(R30), R7
MOVQ (4*BY2WD+12*BY2V)(R30), R8
MOVQ (4*BY2WD+13*BY2V)(R30), R9
MOVQ (4*BY2WD+14*BY2V)(R30), R10
MOVQ (4*BY2WD+15*BY2V)(R30), R11
MOVQ (4*BY2WD+16*BY2V)(R30), R12
MOVQ (4*BY2WD+17*BY2V)(R30), R13
MOVQ (4*BY2WD+18*BY2V)(R30), R14
MOVQ (4*BY2WD+19*BY2V)(R30), R15
MOVQ (4*BY2WD+20*BY2V)(R30), R19
MOVQ (4*BY2WD+21*BY2V)(R30), R20
MOVQ (4*BY2WD+22*BY2V)(R30), R21
MOVQ (4*BY2WD+23*BY2V)(R30), R22
MOVQ (4*BY2WD+24*BY2V)(R30), R23
MOVQ (4*BY2WD+25*BY2V)(R30), R24
MOVQ (4*BY2WD+26*BY2V)(R30), R25
MOVQ (4*BY2WD+27*BY2V)(R30), R26
MOVQ (4*BY2WD+28*BY2V)(R30), R27
MOVQ (4*BY2WD+29*BY2V)(R30), R28
/* USP already restored from (4*BY2WD+30*BY2V)(R30) */
ADDQ $(4*BY2WD+31*BY2V), R30
CALL_PAL $PALrti
TEXT forkret(SB), $0
MOVQ R31, R0 /* Fake out system call return */
JMP systrapret
TEXT syscall0(SB), $-8
SUBQ $(4*BY2WD+31*BY2V), R30
MOVQ R0, (4*BY2WD+4*BY2V)(R30) /* save scallnr in R0 */
MOVQ $HI_IPL, R16
CALL_PAL $PALswpipl
MOVQ $mach0(SB), R(MACH)
CALL_PAL $PALrdusp
MOVQ R0, (4*BY2WD+30*BY2V)(R30) /* save USP */
MOVQ R26, (4*BY2WD+27*BY2V)(R30) /* save last return address */
MOVQ $(4*BY2WD)(R30), R0 /* pass address of Ureg */
JSR syscall(SB)
systrapret:
MOVQ (4*BY2WD+30*BY2V)(R30), R16 /* USP */
CALL_PAL $PALwrusp /* consider doing this in execregs... */
MOVQ (4*BY2WD+27*BY2V)(R30), R26 /* restore last return address */
ADDQ $(4*BY2WD+31*BY2V), R30
CALL_PAL $PALretsys
/*
* Take first processor into user mode
* - argument is stack pointer to user
*/
TEXT touser(SB), $-8
MOVQ R0, R16
CALL_PAL $PALwrusp /* set USP to value passed */
SUBQ $(6*BY2V), R30 /* create frame for retsys */
MOVQ $(UTZERO+32), R26 /* header appears in text */
MOVQ R26, (1*BY2V)(R30) /* PC -- only reg that matters */
CALL_PAL $PALretsys
TEXT rfnote(SB), $0
SUBL $(2*BY2WD), R0, SP
JMP trapret
TEXT savefpregs(SB), $-8
MOVT F0, 0x00(R0)
MOVT F1, 0x08(R0)
MOVT F2, 0x10(R0)
MOVT F3, 0x18(R0)
MOVT F4, 0x20(R0)
MOVT F5, 0x28(R0)
MOVT F6, 0x30(R0)
MOVT F7, 0x38(R0)
MOVT F8, 0x40(R0)
MOVT F9, 0x48(R0)
MOVT F10, 0x50(R0)
MOVT F11, 0x58(R0)
MOVT F12, 0x60(R0)
MOVT F13, 0x68(R0)
MOVT F14, 0x70(R0)
MOVT F15, 0x78(R0)
MOVT F16, 0x80(R0)
MOVT F17, 0x88(R0)
MOVT F18, 0x90(R0)
MOVT F19, 0x98(R0)
MOVT F20, 0xA0(R0)
MOVT F21, 0xA8(R0)
MOVT F22, 0xB0(R0)
MOVT F23, 0xB8(R0)
MOVT F24, 0xC0(R0)
MOVT F25, 0xC8(R0)
MOVT F26, 0xD0(R0)
MOVT F27, 0xD8(R0)
MOVT F28, 0xE0(R0)
MOVT F29, 0xE8(R0)
MOVT F30, 0xF0(R0)
MOVT F31, 0xF8(R0)
MOVT FPCR, F0
MOVT F0, 0x100(R0)
MOVQ $0, R16
CALL_PAL $PALwrfen /* disable */
RET
TEXT restfpregs(SB), $-8
MOVQ $1, R16
CALL_PAL $PALwrfen /* enable */
MOVT 0x100(R0), F0
MOVT F0, FPCR
MOVT 0x00(R0), F0
MOVT 0x08(R0), F1
MOVT 0x10(R0), F2
MOVT 0x18(R0), F3
MOVT 0x20(R0), F4
MOVT 0x28(R0), F5
MOVT 0x30(R0), F6
MOVT 0x38(R0), F7
MOVT 0x40(R0), F8
MOVT 0x48(R0), F9
MOVT 0x50(R0), F10
MOVT 0x58(R0), F11
MOVT 0x60(R0), F12
MOVT 0x68(R0), F13
MOVT 0x70(R0), F14
MOVT 0x78(R0), F15
MOVT 0x80(R0), F16
MOVT 0x88(R0), F17
MOVT 0x90(R0), F18
MOVT 0x98(R0), F19
MOVT 0xA0(R0), F20
MOVT 0xA8(R0), F21
MOVT 0xB0(R0), F22
MOVT 0xB8(R0), F23
MOVT 0xC0(R0), F24
MOVT 0xC8(R0), F25
MOVT 0xD0(R0), F26
MOVT 0xD8(R0), F27
MOVT 0xE0(R0), F28
MOVT 0xE8(R0), F29
MOVT 0xF0(R0), F30
MOVT 0xF8(R0), F31
RET
|
0intro/9hist
| 2,936
|
alphapc/memmove.s
|
#define QUAD 8
#define ALIGN 64
#define BLOCK 64
TEXT memmove(SB), $0
MOVL from+4(FP), R7
MOVL n+8(FP), R10
MOVQ R0, R6
CMPUGE R7, R0, R5
BNE R5, _forward
MOVQ R6, R8 /* end to address */
ADDL R10, R6, R6 /* to+n */
ADDL R10, R7, R7 /* from+n */
CMPUGE $ALIGN, R10, R1 /* need at least ALIGN bytes */
BNE R1, _b1tail
_balign:
AND $(ALIGN-1), R6, R1
BEQ R1, _baligned
MOVBU -1(R7), R2
ADDL $-1, R6, R6
MOVB R2, (R6)
ADDL $-1, R7, R7
JMP _balign
_baligned:
AND $(QUAD-1), R7, R1 /* is the source quad-aligned */
BNE R1, _bunaligned
ADDL $(BLOCK-1), R8, R9
_bblock:
CMPUGE R9, R6, R1
BNE R1, _b8tail
MOVQ -64(R7), R22
MOVQ -56(R7), R23
MOVQ -48(R7), R24
MOVQ -40(R7), R25
MOVQ -32(R7), R2
MOVQ -24(R7), R3
MOVQ -16(R7), R4
MOVQ -8(R7), R5
SUBL $64, R6, R6
SUBL $64, R7, R7
MOVQ R22, (R6)
MOVQ R23, 8(R6)
MOVQ R24, 16(R6)
MOVQ R25, 24(R6)
MOVQ R2, 32(R6)
MOVQ R3, 40(R6)
MOVQ R4, 48(R6)
MOVQ R5, 56(R6)
JMP _bblock
_b8tail:
ADDL $(QUAD-1), R8, R9
_b8block:
CMPUGE R9, R6, R1
BNE R1, _b1tail
MOVQ -8(R7), R2
SUBL $8, R6
MOVQ R2, (R6)
SUBL $8, R7
JMP _b8block
_b1tail:
CMPUGE R8, R6, R1
BNE R1, _ret
MOVBU -1(R7), R2
SUBL $1, R6, R6
MOVB R2, (R6)
SUBL $1, R7, R7
JMP _b1tail
_ret:
RET
_bunaligned:
ADDL $(16-1), R8, R9
_bu8block:
CMPUGE R9, R6, R1
BNE R1, _b1tail
MOVQU -16(R7), R4
MOVQU -8(R7), R3
MOVQU (R7), R2
SUBL $16, R6
EXTQH R7, R2, R2
EXTQL R7, R3, R5
OR R5, R2, R11
EXTQH R7, R3, R3
EXTQL R7, R4, R4
OR R3, R4, R13
MOVQ R11, 8(R6)
MOVQ R13, (R6)
SUBL $16, R7
JMP _bu8block
_forward:
ADDL R10, R6, R8 /* end to address */
CMPUGE $ALIGN, R10, R1 /* need at least ALIGN bytes */
BNE R1, _f1tail
_falign:
AND $(ALIGN-1), R6, R1
BEQ R1, _faligned
MOVBU (R7), R2
ADDL $1, R6, R6
ADDL $1, R7, R7
MOVB R2, -1(R6)
JMP _falign
_faligned:
AND $(QUAD-1), R7, R1 /* is the source quad-aligned */
BNE R1, _funaligned
SUBL $(BLOCK-1), R8, R9
_fblock:
CMPUGT R9, R6, R1
BEQ R1, _f8tail
MOVQ (R7), R2
MOVQ 8(R7), R3
MOVQ 16(R7), R4
MOVQ 24(R7), R5
MOVQ 32(R7), R22
MOVQ 40(R7), R23
MOVQ 48(R7), R24
MOVQ 56(R7), R25
ADDL $64, R6, R6
ADDL $64, R7, R7
MOVQ R2, -64(R6)
MOVQ R3, -56(R6)
MOVQ R4, -48(R6)
MOVQ R5, -40(R6)
MOVQ R22, -32(R6)
MOVQ R23, -24(R6)
MOVQ R24, -16(R6)
MOVQ R25, -8(R6)
JMP _fblock
_f8tail:
SUBL $(QUAD-1), R8, R9
_f8block:
CMPUGT R9, R6, R1
BEQ R1, _f1tail
MOVQ (R7), R2
ADDL $8, R6
ADDL $8, R7
MOVQ R2, -8(R6)
JMP _f8block
_f1tail:
CMPUGT R8, R6, R1
BEQ R1, _fret
MOVBU (R7), R2
ADDL $1, R6, R6
ADDL $1, R7, R7
MOVB R2, -1(R6)
JMP _f1tail
_fret:
RET
_funaligned:
SUBL $(16-1), R8, R9
_fu8block:
CMPUGT R9, R6, R1
BEQ R1, _f1tail
MOVQU (R7), R2
MOVQU 8(R7), R3
MOVQU 16(R7), R4
EXTQL R7, R2, R2
EXTQH R7, R3, R5
OR R5, R2, R11
EXTQL R7, R3, R3
MOVQ R11, (R6)
EXTQH R7, R4, R4
OR R3, R4, R11
MOVQ R11, 8(R6)
ADDL $16, R6
ADDL $16, R7
JMP _fu8block
|
0intro/9hist
| 10,389
|
mtx/l.s
|
#include "mem.h"
/* use of SPRG registers in save/restore */
#define SAVER0 SPRG0
#define SAVER1 SPRG1
#define SAVELR SPRG2
#define SAVEXX SPRG3
/* special instruction definitions */
#define BDNZ BC 16,0,
#define BDNE BC 0,2,
#define TLBIA WORD $((31<<26)|(307<<1))
#define TLBSYNC WORD $((31<<26)|(566<<1))
/* on some models mtmsr doesn't synchronise enough (eg, 603e) */
#define MSRSYNC SYNC; ISYNC
#define UREGSPACE (UREGSIZE+8)
TEXT start(SB), $-4
/*
* setup MSR
* turn off interrupts
* use 0x000 as exception prefix
* enable machine check
*/
MOVW MSR, R3
MOVW $(MSR_EE|MSR_IP), R4
ANDN R4, R3
OR $(MSR_ME), R3
ISYNC
MOVW R3, MSR
MSRSYNC
/* except during trap handling, R0 is zero from now on */
MOVW $0, R0
/* setup SB for pre mmu */
MOVW $setSB(SB), R2
MOVW $KZERO, R3
ANDN R3, R2
BL mmuinit0(SB)
/* running with MMU on!! */
/* set R2 to correct value */
MOVW $setSB(SB), R2
/* debugger sets R1 to top of usable memory +1 */
MOVW R1, memsize(SB)
BL kfpinit(SB)
/* set up Mach */
MOVW $mach0(SB), R(MACH)
ADD $(MACHSIZE-8), R(MACH), R1 /* set stack */
MOVW R0, R(USER)
MOVW R0, 0(R(MACH))
BL main(SB)
RETURN /* not reached */
GLOBL mach0(SB), $(MAXMACH*BY2PG)
GLOBL memsize(SB), $4
/*
* on return from this function we will be running in virtual mode.
* We set up the Block Address Translation (BAT) registers thus:
* 1) first 3 BATs are 256M blocks, starting from KZERO->0
* 2) remaining BAT maps last 256M directly
*/
TEXT mmuinit0(SB), $0
/* reset all the tlbs */
MOVW $64, R3
MOVW R3, CTR
MOVW $0, R4
tlbloop:
TLBIE R4
ADD $BIT(19), R4
BDNZ tlbloop
TLBSYNC
/* KZERO -> 0 */
MOVW $(KZERO|(0x7ff<<2)|2), R3
MOVW $(PTEVALID|PTEWRITE), R4
MOVW R3, SPR(IBATU(0))
MOVW R4, SPR(IBATL(0))
MOVW R3, SPR(DBATU(0))
MOVW R4, SPR(DBATL(0))
/* KZERO+256M -> 256M */
ADD $(1<<28), R3
ADD $(1<<28), R4
MOVW R3, SPR(IBATU(1))
MOVW R4, SPR(IBATL(1))
MOVW R3, SPR(DBATU(1))
MOVW R4, SPR(DBATL(1))
/* KZERO+512M -> 512M */
ADD $(1<<28), R3
ADD $(1<<28), R4
MOVW R3, SPR(IBATU(2))
MOVW R4, SPR(IBATL(2))
MOVW R3, SPR(DBATU(2))
MOVW R4, SPR(DBATL(2))
/* direct map last block, uncached, (?guarded) */
MOVW $((0xf<<28)|(0x7ff<<2)|2), R3
MOVW $((0xf<<28)|PTE1_I|PTE1_G|PTE1_RW), R4
MOVW R3, SPR(DBATU(3))
MOVW R4, SPR(DBATL(3))
/* IBAT 3 unused */
MOVW R0, SPR(IBATU(3))
MOVW R0, SPR(IBATL(3))
/* enable MMU */
MOVW LR, R3
OR $KZERO, R3
MOVW R3, SPR(SRR0)
MOVW MSR, R4
OR $(MSR_IR|MSR_DR), R4
MOVW R4, SPR(SRR1)
RFI /* resume in kernel mode in caller */
RETURN
TEXT kfpinit(SB), $0
MOVFL $0,FPSCR(7)
MOVFL $0xD,FPSCR(6) /* VE, OE, ZE */
MOVFL $0, FPSCR(5)
MOVFL $0, FPSCR(3)
MOVFL $0, FPSCR(2)
MOVFL $0, FPSCR(1)
MOVFL $0, FPSCR(0)
FMOVD $4503601774854144.0, F27
FMOVD $0.5, F29
FSUB F29, F29, F28
FADD F29, F29, F30
FADD F30, F30, F31
FMOVD F28, F0
FMOVD F28, F1
FMOVD F28, F2
FMOVD F28, F3
FMOVD F28, F4
FMOVD F28, F5
FMOVD F28, F6
FMOVD F28, F7
FMOVD F28, F8
FMOVD F28, F9
FMOVD F28, F10
FMOVD F28, F11
FMOVD F28, F12
FMOVD F28, F13
FMOVD F28, F14
FMOVD F28, F15
FMOVD F28, F16
FMOVD F28, F17
FMOVD F28, F18
FMOVD F28, F19
FMOVD F28, F20
FMOVD F28, F21
FMOVD F28, F22
FMOVD F28, F23
FMOVD F28, F24
FMOVD F28, F25
FMOVD F28, F26
RETURN
TEXT splhi(SB), $0
MOVW LR, R31
MOVW R31, 4(R(MACH)) /* save PC in m->splpc */
MOVW MSR, R3
RLWNM $0, R3, $~MSR_EE, R4
SYNC
MOVW R4, MSR
MSRSYNC
RETURN
TEXT splx(SB), $0
/* fall though */
TEXT splxpc(SB), $0
MOVW LR, R31
MOVW R31, 4(R(MACH)) /* save PC in m->splpc */
MOVW MSR, R4
RLWMI $0, R3, $MSR_EE, R4
SYNC
MOVW R4, MSR
MSRSYNC
RETURN
TEXT spllo(SB), $0
MOVW MSR, R3
OR $MSR_EE, R3, R4
SYNC
MOVW R4, MSR
MSRSYNC
RETURN
TEXT spldone(SB), $0
RETURN
TEXT islo(SB), $0
MOVW MSR, R3
RLWNM $0, R3, $MSR_EE, R3
RETURN
TEXT setlabel(SB), $-4
MOVW LR, R31
MOVW R1, 0(R3)
MOVW R31, 4(R3)
MOVW $0, R3
RETURN
TEXT gotolabel(SB), $-4
MOVW 4(R3), R31
MOVW R31, LR
MOVW 0(R3), R1
MOVW $1, R3
RETURN
TEXT touser(SB), $-4
MOVW $(UTZERO+32), R5 /* header appears in text */
MOVW $(MSR_EE|MSR_PR|MSR_ME|MSR_IR|MSR_DR|MSR_RI), R4
MOVW R4, SPR(SRR1)
MOVW R3, R1
MOVW R5, SPR(SRR0)
RFI
TEXT icflush(SB), $-4 /* icflush(virtaddr, count) */
MOVW n+4(FP), R4
RLWNM $0, R3, $~(CACHELINESZ-1), R5
SUB R5, R3
ADD R3, R4
ADD $(CACHELINESZ-1), R4
SRAW $CACHELINELOG, R4
MOVW R4, CTR
icf0: ICBI (R5)
ADD $CACHELINESZ, R5
BDNZ icf0
ISYNC
RETURN
TEXT dcflush(SB), $-4 /* dcflush(virtaddr, count) */
MOVW n+4(FP), R4
RLWNM $0, R3, $~(CACHELINESZ-1), R5
CMP R4, $0
BLE dcf1
SUB R5, R3
ADD R3, R4
ADD $(CACHELINESZ-1), R4
SRAW $CACHELINELOG, R4
MOVW R4, CTR
dcf0: DCBF (R5)
ADD $CACHELINESZ, R5
BDNZ dcf0
dcf1:
SYNC
RETURN
TEXT tas(SB), $0
SYNC
MOVW R3, R4
MOVW $0xdead,R5
tas1:
DCBF (R4) /* fix for 603x bug */
LWAR (R4), R3
CMP R3, $0
BNE tas0
STWCCC R5, (R4)
BNE tas1
tas0:
SYNC
ISYNC
RETURN
TEXT getpvr(SB), $0
MOVW SPR(PVR), R3
RETURN
TEXT getdec(SB), $0
MOVW SPR(DEC), R3
RETURN
TEXT putdec(SB), $0
MOVW R3, SPR(DEC)
RETURN
TEXT getdar(SB), $0
MOVW SPR(DAR), R3
RETURN
TEXT getdsisr(SB), $0
MOVW SPR(DSISR), R3
RETURN
TEXT getmsr(SB), $0
MOVW MSR, R3
RETURN
TEXT putmsr(SB), $0
SYNC
MOVW R3, MSR
MSRSYNC
RETURN
TEXT putsdr1(SB), $0
MOVW R3, SPR(SDR1)
RETURN
TEXT putsr(SB), $0
MOVW 4(FP), R4
MOVW R4, SEG(R3)
RETURN
TEXT gethid0(SB), $0
MOVW SPR(HID0), R3
RETURN
TEXT gethid1(SB), $0
MOVW SPR(HID1), R3
RETURN
TEXT puthid0(SB), $0
MOVW R3, SPR(HID0)
RETURN
TEXT puthid1(SB), $0
MOVW R3, SPR(HID1)
RETURN
TEXT eieio(SB), $0
EIEIO
RETURN
TEXT sync(SB), $0
SYNC
RETURN
TEXT tlbflushall(SB), $0
MOVW $64, R3
MOVW R3, CTR
MOVW $0, R4
tlbflushall0:
TLBIE R4
ADD $BIT(19), R4
BDNZ tlbflushall0
EIEIO
TLBSYNC
SYNC
RETURN
TEXT tlbflush(SB), $0
TLBIE R3
RETURN
TEXT gotopc(SB), $0
MOVW R3, CTR
MOVW LR, R31 /* for trace back */
BR (CTR)
/*
* traps force memory mapping off.
* the following code has been executed at the exception
* vector location
* MOVW R0, SPR(SAVER0)
* MOVW LR, R0
* MOVW R0, SPR(SAVELR)
* bl trapvec(SB)
*/
TEXT trapvec(SB), $-4
MOVW LR, R0
MOVW R1, SPR(SAVER1)
MOVW R0, SPR(SAVEXX) /* vector */
/* did we come from user space */
MOVW SPR(SRR1), R0
MOVW CR, R1
MOVW R0, CR
BC 4,17,ktrap
/* switch to kernel stack */
MOVW R1, CR
MOVW R2, R0
MOVW $setSB(SB), R2
RLWNM $0, R2, $~KZERO, R2 /* PADDR(setSB) */
MOVW $mach0(SB), R1 /* m-> */
RLWNM $0, R1, $~KZERO, R1 /* PADDR(m->) */
MOVW 8(R1), R1 /* m->proc */
RLWNM $0, R1, $~KZERO, R1 /* PADDR(m->proc) */
MOVW 8(R1), R1 /* m->proc->kstack */
RLWNM $0, R1, $~KZERO, R1 /* PADDR(m->proc->kstack) */
ADD $(KSTACK-UREGSIZE), R1
MOVW R0, R2
BL saveureg(SB)
BL trap(SB)
BR restoreureg
ktrap:
MOVW R1, CR
MOVW SPR(SAVER1), R1
RLWNM $0, R1, $~KZERO, R1 /* PADDR(R1) */
SUB $UREGSPACE, R1
BL saveureg(SB)
BL trap(SB)
BR restoreureg
/*
* enter with stack set and mapped.
* on return, SB (R2) has been set, and R3 has the Ureg*,
* the MMU has been re-enabled, kernel text and PC are in KSEG,
* R(MACH) has been set, and R0 contains 0.
*
*/
TEXT saveureg(SB), $-4
/*
* save state
*/
MOVMW R2, 48(R1) /* r2:r31 */
MOVW $setSB(SB), R2
RLWNM $0, R2, $~KZERO, R2 /* PADDR(setSB) */
MOVW $mach0(SB), R(MACH)
RLWNM $0, R(MACH), $~KZERO, R(MACH) /* PADDR(m->) */
MOVW 8(R(MACH)), R(USER)
MOVW $mach0(SB), R(MACH)
MOVW $setSB(SB), R2
MOVW SPR(SAVER1), R4
MOVW R4, 44(R1)
MOVW SPR(SAVER0), R5
MOVW R5, 40(R1)
MOVW CTR, R6
MOVW R6, 36(R1)
MOVW XER, R4
MOVW R4, 32(R1)
MOVW CR, R5
MOVW R5, 28(R1)
MOVW SPR(SAVELR), R6 /* LR */
MOVW R6, 24(R1)
/* pad at 20(R1) */
MOVW SPR(SRR0), R0
MOVW R0, 16(R1) /* old PC */
MOVW SPR(SRR1), R0
MOVW R0, 12(R1) /* old status */
MOVW SPR(SAVEXX), R0
MOVW R0, 8(R1) /* cause/vector */
ADD $8, R1, R3 /* Ureg* */
OR $KZERO, R3 /* fix ureg */
STWCCC R3, (R1) /* break any pending reservations */
MOVW $0, R0 /* compiler/linker expect R0 to be zero */
MOVW MSR, R5
OR $(MSR_IR|MSR_DR|MSR_FP|MSR_RI), R5 /* enable MMU */
MOVW R5, SPR(SRR1)
MOVW LR, R31
OR $KZERO, R31 /* return PC in KSEG0 */
MOVW R31, SPR(SRR0)
OR $KZERO, R1 /* fix stack pointer */
RFI /* returns to trap handler */
/*
* restore state from Ureg and return from trap/interrupt
*/
TEXT forkret(SB), $0
BR restoreureg
restoreureg:
MOVMW 48(R1), R2 /* r2:r31 */
/* defer R1 */
MOVW 40(R1), R0
MOVW R0, SPR(SAVER0)
MOVW 36(R1), R0
MOVW R0, CTR
MOVW 32(R1), R0
MOVW R0, XER
MOVW 28(R1), R0
MOVW R0, CR /* CR */
MOVW 24(R1), R0
MOVW R0, LR
/* pad, skip */
MOVW 16(R1), R0
MOVW R0, SPR(SRR0) /* old PC */
MOVW 12(R1), R0
MOVW R0, SPR(SRR1) /* old MSR */
/* cause, skip */
MOVW 44(R1), R1 /* old SP */
MOVW SPR(SAVER0), R0
RFI
TEXT fpsave(SB), $0
FMOVD F0, (0*8)(R3)
FMOVD F1, (1*8)(R3)
FMOVD F2, (2*8)(R3)
FMOVD F3, (3*8)(R3)
FMOVD F4, (4*8)(R3)
FMOVD F5, (5*8)(R3)
FMOVD F6, (6*8)(R3)
FMOVD F7, (7*8)(R3)
FMOVD F8, (8*8)(R3)
FMOVD F9, (9*8)(R3)
FMOVD F10, (10*8)(R3)
FMOVD F11, (11*8)(R3)
FMOVD F12, (12*8)(R3)
FMOVD F13, (13*8)(R3)
FMOVD F14, (14*8)(R3)
FMOVD F15, (15*8)(R3)
FMOVD F16, (16*8)(R3)
FMOVD F17, (17*8)(R3)
FMOVD F18, (18*8)(R3)
FMOVD F19, (19*8)(R3)
FMOVD F20, (20*8)(R3)
FMOVD F21, (21*8)(R3)
FMOVD F22, (22*8)(R3)
FMOVD F23, (23*8)(R3)
FMOVD F24, (24*8)(R3)
FMOVD F25, (25*8)(R3)
FMOVD F26, (26*8)(R3)
FMOVD F27, (27*8)(R3)
FMOVD F28, (28*8)(R3)
FMOVD F29, (29*8)(R3)
FMOVD F30, (30*8)(R3)
FMOVD F31, (31*8)(R3)
MOVFL FPSCR, F0
FMOVD F0, (32*8)(R3)
RETURN
TEXT fprestore(SB), $0
FMOVD (32*8)(R3), F0
MOVFL F0, FPSCR
FMOVD (0*8)(R3), F0
FMOVD (1*8)(R3), F1
FMOVD (2*8)(R3), F2
FMOVD (3*8)(R3), F3
FMOVD (4*8)(R3), F4
FMOVD (5*8)(R3), F5
FMOVD (6*8)(R3), F6
FMOVD (7*8)(R3), F7
FMOVD (8*8)(R3), F8
FMOVD (9*8)(R3), F9
FMOVD (10*8)(R3), F10
FMOVD (11*8)(R3), F11
FMOVD (12*8)(R3), F12
FMOVD (13*8)(R3), F13
FMOVD (14*8)(R3), F14
FMOVD (15*8)(R3), F15
FMOVD (16*8)(R3), F16
FMOVD (17*8)(R3), F17
FMOVD (18*8)(R3), F18
FMOVD (19*8)(R3), F19
FMOVD (20*8)(R3), F20
FMOVD (21*8)(R3), F21
FMOVD (22*8)(R3), F22
FMOVD (23*8)(R3), F23
FMOVD (24*8)(R3), F24
FMOVD (25*8)(R3), F25
FMOVD (26*8)(R3), F26
FMOVD (27*8)(R3), F27
FMOVD (28*8)(R3), F28
FMOVD (29*8)(R3), F29
FMOVD (30*8)(R3), F30
FMOVD (31*8)(R3), F31
RETURN
|
0intro/9hist
| 1,476
|
mtx/inb.s
|
#include "mem.h"
#define BDNZ BC 16,0,
#define BDNE BC 0,2,
TEXT inb(SB), $0
OR $IOMEM, R3
MOVBZ (R3), R3
RETURN
TEXT insb(SB), $0
MOVW v+4(FP), R4
MOVW n+8(FP), R5
MOVW R5, CTR
OR $IOMEM, R3
SUB $1, R4
insb1:
MOVBZ (R3), R7
MOVBU R7, 1(R4)
BDNZ insb1
RETURN
TEXT outb(SB), $0
MOVW v+4(FP), R4
OR $IOMEM, R3
EIEIO
MOVB R4, (R3)
RETURN
TEXT outsb(SB), $0
MOVW v+4(FP), R4
MOVW n+8(FP), R5
MOVW R5, CTR
OR $IOMEM, R3
SUB $1, R4
outsb1:
EIEIO
MOVBZU 1(R4), R7
MOVB R7, (R3)
BDNZ outsb1
RETURN
TEXT ins(SB), $0
OR $IOMEM, R3
EIEIO
MOVHBR (R3), R3
RETURN
TEXT inss(SB), $0
MOVW v+4(FP), R4
MOVW n+8(FP), R5
MOVW R5, CTR
OR $IOMEM, R3
SUB $2, R4
inss1:
EIEIO
MOVHZ (R3), R7
MOVHU R7, 2(R4)
BDNZ inss1
RETURN
TEXT outs(SB), $0
MOVW v+4(FP), R4
OR $IOMEM, R3
EIEIO
MOVHBR R4, (R3)
RETURN
TEXT outss(SB), $0
MOVW v+4(FP), R4
MOVW n+8(FP), R5
MOVW R5, CTR
OR $IOMEM, R3
SUB $2, R4
outss1:
EIEIO
MOVHZU 2(R4), R7
MOVH R7, (R3)
BDNZ outss1
RETURN
TEXT inl(SB), $0
OR $IOMEM, R3
EIEIO
MOVWBR (R3), R3
RETURN
TEXT insl(SB), $0
MOVW v+4(FP), R4
MOVW n+8(FP), R5
MOVW R5, CTR
OR $IOMEM, R3
SUB $4, R4
insl1:
EIEIO
MOVW (R3), R7
MOVWU R7, 4(R4)
BDNZ insl1
RETURN
TEXT outl(SB), $0
MOVW v+4(FP), R4
OR $IOMEM, R3
EIEIO
MOVWBR R4, (R3)
RETURN
TEXT outsl(SB), $0
MOVW v+4(FP), R4
MOVW n+8(FP), R5
MOVW R5, CTR
OR $IOMEM, R3
SUB $4, R4
outsl1:
EIEIO
MOVWU 4(R4), R7
MOVW R7, (R3)
BDNZ outsl1
RETURN
|
0intro/9hist
| 18,182
|
bitsy/l.s
|
#include "mem.h"
/*
* Entered here from Compaq's bootldr with MMU disabled.
*/
TEXT _start(SB), $-4
MOVW $setR12(SB), R12 /* load the SB */
_main:
/* SVC mode, interrupts disabled */
MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R1
MOVW R1, CPSR
/* disable the MMU */
MOVW $0x130, R1
MCR CpMMU, 0, R1, C(CpControl), C(0x0)
/* flush caches */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x7), 0
/* drain prefetch */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
/* drain write buffer */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
MOVW $(MACHADDR+4*BY2PG), R13 /* stack */
SUB $4, R13 /* link */
BL main(SB)
BL exit(SB)
/* we shouldn't get here */
_mainloop:
B _mainloop
BL _div(SB) /* hack to get _div etc loaded */
/* flush tlb's */
TEXT mmuinvalidate(SB), $-4
MCR CpMMU, 0, R0, C(CpTLBFlush), C(0x7)
RET
/* flush tlb's */
TEXT mmuinvalidateaddr(SB), $-4
MCR CpMMU, 0, R0, C(CpTLBFlush), C(0x6), 1
RET
/* write back and invalidate i and d caches */
TEXT cacheflush(SB), $-4
/* splhi */
MOVW CPSR, R3
ORR $(PsrDirq), R3, R1
MOVW R1, CPSR
/* write back any dirty data */
MOVW $0xe0000000,R0
ADD $(8*1024),R0,R1
_cfloop:
MOVW.P 32(R0),R2
CMP.S R0,R1
BGE _cfloop
/* drain write buffer and invalidate i cache contents */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x5), 0
/* drain prefetch */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
/* splx */
MOVW R3, CPSR
RET
/* write back d cache */
TEXT cachewb(SB), $-4
/* write back any dirty data */
_cachewb:
MOVW $0xe0000000,R0
ADD $(8*1024),R0,R1
_cwbloop:
MOVW.P 32(R0),R2
CMP.S R0,R1
BGE _cwbloop
/* drain write buffer */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
RET
/* write back a single cache line */
TEXT cachewbaddr(SB), $-4
BIC $31,R0
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 1
B _wbflush
/* write back a region of cache lines */
TEXT cachewbregion(SB), $-4
MOVW 4(FP),R1
CMP.S $(4*1024),R1
BGT _cachewb
ADD R0,R1
BIC $31,R0
_cwbrloop:
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 1
ADD $32,R0
CMP.S R0,R1
BGT _cwbrloop
B _wbflush
/* invalidate the dcache */
TEXT dcacheinvalidate(SB), $-4
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x6)
RET
/* invalidate the icache */
TEXT icacheinvalidate(SB), $-4
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x9)
RET
/* drain write buffer */
TEXT wbflush(SB), $-4
_wbflush:
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
RET
/* return cpu id */
TEXT getcpuid(SB), $-4
MRC CpMMU, 0, R0, C(CpCPUID), C(0x0)
RET
/* return fault status */
TEXT getfsr(SB), $-4
MRC CpMMU, 0, R0, C(CpFSR), C(0x0)
RET
/* return mmu control register */
TEXT getcontrol(SB), $-4
SUB R0, R0
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
RET
/* return mmu dac register */
TEXT getdac(SB), $-4
SUB R0, R0
MRC CpMMU, 0, R0, C(CpDAC), C(0x0)
RET
/* return mmu ttb register */
TEXT getttb(SB), $-4
SUB R0, R0
MRC CpMMU, 0, R0, C(CpTTB), C(0x0)
RET
/* return fault address */
TEXT getfar(SB), $-4
MRC CpMMU, 0, R0, C(CpFAR), C(0x0)
RET
/* set the translation table base */
TEXT putttb(SB), $-4
MCR CpMMU, 0, R0, C(CpTTB), C(0x0)
RET
/*
* enable mmu, i and d caches
*/
TEXT mmuenable(SB), $-4
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
ORR $(CpCmmuena|CpCdcache|CpCicache|CpCwb), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
RET
TEXT mmudisable(SB), $-4
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
BIC $(CpCmmuena|CpCdcache|CpCicache|CpCwb|CpCvivec), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
RET
/*
* use exception vectors at 0xffff0000
*/
TEXT mappedIvecEnable(SB), $-4
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
ORR $(CpCvivec), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
RET
TEXT mappedIvecDisable(SB), $-4
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
BIC $(CpCvivec), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
RET
/* set the translation table base */
TEXT putdac(SB), $-4
MCR CpMMU, 0, R0, C(CpDAC), C(0x0)
RET
/* set address translation pid */
TEXT putpid(SB), $-4
MCR CpMMU, 0, R0, C(CpPID), C(0x0)
RET
/*
* set the stack value for the mode passed in R0
*/
TEXT setr13(SB), $-4
MOVW 4(FP), R1
MOVW CPSR, R2
BIC $PsrMask, R2, R3
ORR R0, R3
MOVW R3, CPSR
MOVW R13, R0
MOVW R1, R13
MOVW R2, CPSR
RET
/*
* exception vectors, copied by trapinit() to somewhere useful
*/
TEXT vectors(SB), $-4
MOVW 0x18(R15), R15 /* reset */
MOVW 0x18(R15), R15 /* undefined */
MOVW 0x18(R15), R15 /* SWI */
MOVW 0x18(R15), R15 /* prefetch abort */
MOVW 0x18(R15), R15 /* data abort */
MOVW 0x18(R15), R15 /* reserved */
MOVW 0x18(R15), R15 /* IRQ */
MOVW 0x18(R15), R15 /* FIQ */
TEXT vtable(SB), $-4
WORD $_vsvc(SB) /* reset, in svc mode already */
WORD $_vund(SB) /* undefined, switch to svc mode */
WORD $_vsvc(SB) /* swi, in svc mode already */
WORD $_vpabt(SB) /* prefetch abort, switch to svc mode */
WORD $_vdabt(SB) /* data abort, switch to svc mode */
WORD $_vsvc(SB) /* reserved */
WORD $_virq(SB) /* IRQ, switch to svc mode */
WORD $_vfiq(SB) /* FIQ, switch to svc mode */
TEXT _vrst(SB), $-4
BL resettrap(SB)
TEXT _vsvc(SB), $-4 /* SWI */
MOVW.W R14, -4(R13) /* ureg->pc = interupted PC */
MOVW SPSR, R14 /* ureg->psr = SPSR */
MOVW.W R14, -4(R13) /* ... */
MOVW $PsrMsvc, R14 /* ureg->type = PsrMsvc */
MOVW.W R14, -4(R13) /* ... */
MOVM.DB.W.S [R0-R14], (R13) /* save user level registers, at end r13 points to ureg */
MOVW $setR12(SB), R12 /* Make sure we've got the kernel's SB loaded */
MOVW R13, R0 /* first arg is pointer to ureg */
SUB $8, R13 /* space for argument+link */
BL syscall(SB)
ADD $(8+4*15), R13 /* make r13 point to ureg->type */
MOVW 8(R13), R14 /* restore link */
MOVW 4(R13), R0 /* restore SPSR */
MOVW R0, SPSR /* ... */
MOVM.DB.S (R13), [R0-R14] /* restore registers */
ADD $8, R13 /* pop past ureg->{type+psr} */
RFE /* MOVM.IA.S.W (R13), [R15] */
TEXT _vund(SB), $-4 /* undefined */
MOVM.IA [R0-R4], (R13) /* free some working space */
MOVW $PsrMund, R0
B _vswitch
TEXT _vpabt(SB), $-4 /* prefetch abort */
MOVM.IA [R0-R4], (R13) /* free some working space */
MOVW $PsrMabt, R0 /* r0 = type */
B _vswitch
TEXT _vdabt(SB), $-4 /* prefetch abort */
MOVM.IA [R0-R4], (R13) /* free some working space */
MOVW $(PsrMabt+1), R0 /* r0 = type */
B _vswitch
TEXT _virq(SB), $-4 /* IRQ */
MOVM.IA [R0-R4], (R13) /* free some working space */
MOVW $PsrMirq, R0 /* r0 = type */
B _vswitch
/*
* come here with type in R0 and R13 pointing above saved [r0-r4]
* and type in r0. we'll switch to SVC mode and then call trap.
*/
_vswitch:
MOVW SPSR, R1 /* save SPSR for ureg */
MOVW R14, R2 /* save interrupted pc for ureg */
MOVW R13, R3 /* save pointer to where the original [R0-R3] are */
/* switch to svc mode */
MOVW CPSR, R14
BIC $PsrMask, R14
ORR $(PsrDirq|PsrDfiq|PsrMsvc), R14
MOVW R14, CPSR
/* interupted code kernel or user? */
AND.S $0xf, R1, R4
BEQ _userexcep
/* here for trap from SVC mode */
MOVM.DB.W [R0-R2], (R13) /* set ureg->{type, psr, pc}; r13 points to ureg->type */
MOVM.IA (R3), [R0-R4] /* restore [R0-R4] from previous mode's stack */
MOVM.DB.W [R0-R14], (R13) /* save kernel level registers, at end r13 points to ureg */
MOVW $setR12(SB), R12 /* Make sure we've got the kernel's SB loaded */
MOVW R13, R0 /* first arg is pointer to ureg */
SUB $8, R13 /* space for argument+link (for debugger) */
MOVW $0xdeaddead,R11 /* marker */
BL trap(SB)
ADD $(8+4*15), R13 /* make r13 point to ureg->type */
MOVW 8(R13), R14 /* restore link */
MOVW 4(R13), R0 /* restore SPSR */
MOVW R0, SPSR /* ... */
MOVM.DB (R13), [R0-R14] /* restore registers */
ADD $8, R13 /* pop past ureg->{type+psr} */
RFE /* MOVM.IA.S.W (R13), [R15] */
/* here for trap from USER mode */
_userexcep:
MOVM.DB.W [R0-R2], (R13) /* set ureg->{type, psr, pc}; r13 points to ureg->type */
MOVM.IA (R3), [R0-R4] /* restore [R0-R4] from previous mode's stack */
MOVM.DB.W.S [R0-R14], (R13) /* save kernel level registers, at end r13 points to ureg */
MOVW $setR12(SB), R12 /* Make sure we've got the kernel's SB loaded */
MOVW R13, R0 /* first arg is pointer to ureg */
SUB $8, R13 /* space for argument+link (for debugger) */
BL trap(SB)
ADD $(8+4*15), R13 /* make r13 point to ureg->type */
MOVW 8(R13), R14 /* restore link */
MOVW 4(R13), R0 /* restore SPSR */
MOVW R0, SPSR /* ... */
MOVM.DB.S (R13), [R0-R14] /* restore registers */
ADD $8, R13 /* pop past ureg->{type+psr} */
RFE /* MOVM.IA.S.W (R13), [R15] */
TEXT _vfiq(SB), $-4 /* FIQ */
RFE /* FIQ is special, ignore it for now */
/*
* This is the first jump from kernel to user mode.
* Fake a return from interrupt.
*
* Enter with R0 containing the user stack pointer.
* UTZERO + 0x20 is always the entry point.
*
*/
TEXT touser(SB),$-4
/* store the user stack pointer into the USR_r13 */
MOVM.DB.W [R0], (R13)
MOVM.S.IA.W (R13),[R13]
/* set up a PSR for user level */
MOVW $(PsrMusr), R0
MOVW R0,SPSR
/* save the PC on the stack */
MOVW $(UTZERO+0x20), R0
MOVM.DB.W [R0],(R13)
/* return from interrupt */
RFE /* MOVM.IA.S.W (R13), [R15] */
/*
* here to jump to a newly forked process
*/
TEXT forkret(SB),$-4
ADD $(4*15), R13 /* make r13 point to ureg->type */
MOVW 8(R13), R14 /* restore link */
MOVW 4(R13), R0 /* restore SPSR */
MOVW R0, SPSR /* ... */
MOVM.DB.S (R13), [R0-R14] /* restore registers */
ADD $8, R13 /* pop past ureg->{type+psr} */
RFE /* MOVM.IA.S.W (R13), [R15] */
TEXT splhi(SB), $-4
/* save caller pc in Mach */
MOVW $(MACHADDR+0x04),R2
MOVW R14,0(R2)
/* turn off interrupts */
MOVW CPSR, R0
ORR $(PsrDirq), R0, R1
MOVW R1, CPSR
RET
TEXT spllo(SB), $-4
MOVW CPSR, R0
BIC $(PsrDirq), R0, R1
MOVW R1, CPSR
RET
TEXT splx(SB), $-4
/* save caller pc in Mach */
MOVW $(MACHADDR+0x04),R2
MOVW R14,0(R2)
/* reset interrupt level */
MOVW R0, R1
MOVW CPSR, R0
MOVW R1, CPSR
RET
TEXT splxpc(SB), $-4 /* for iunlock */
MOVW R0, R1
MOVW CPSR, R0
MOVW R1, CPSR
RET
TEXT spldone(SB), $0
RET
TEXT islo(SB), $-4
MOVW CPSR, R0
AND $(PsrDirq), R0
EOR $(PsrDirq), R0
RET
TEXT cpsrr(SB), $-4
MOVW CPSR, R0
RET
TEXT spsrr(SB), $-4
MOVW SPSR, R0
RET
TEXT getsp(SB), $-4
MOVW R13, R0
RET
TEXT getlink(SB), $-4
MOVW R14, R0
RET
TEXT getcallerpc(SB), $-4
MOVW 0(R13), R0
RET
TEXT tas(SB), $-4
MOVW R0, R1
MOVW $0xDEADDEAD, R0
MOVW R0, R3
SWPW R0, (R1)
CMP.S R0, R3
BEQ _tasout
EOR R3, R3
CMP.S R0, R3
BEQ _tasout
MOVW $1,R15
_tasout:
RET
TEXT setlabel(SB), $-4
MOVW R13, 0(R0) /* sp */
MOVW R14, 4(R0) /* pc */
MOVW $0, R0
RET
TEXT gotolabel(SB), $-4
MOVW 0(R0), R13 /* sp */
MOVW 4(R0), R14 /* pc */
MOVW $1, R0
RET
/* save the state machine in power_state[] for an upcoming suspend
*/
TEXT setpowerlabel(SB), $-4
MOVW $power_state+0(SB), R0
/* svc */ /* power_state[]: what */
MOVW R1, 0(R0)
MOVW R2, 4(R0)
MOVW R3, 8(R0)
MOVW R4, 12(R0)
MOVW R5, 16(R0)
MOVW R6, 20(R0)
MOVW R7, 24(R0)
MOVW R8, 28(R0)
MOVW R9, 32(R0)
MOVW R10,36(R0)
MOVW R11,40(R0)
MOVW R12,44(R0)
MOVW R13,48(R0)
MOVW R14,52(R0)
MOVW SPSR, R1
MOVW R1, 56(R0)
MOVW CPSR, R2
MOVW R2, 60(R0)
/* copro */
MRC CpMMU, 0, R3, C(CpDAC), C(0x0)
MOVW R3, 144(R0)
MRC CpMMU, 0, R3, C(CpTTB), C(0x0)
MOVW R3, 148(R0)
MRC CpMMU, 0, R3, C(CpControl), C(0x0)
MOVW R3, 152(R0)
MRC CpMMU, 0, R3, C(CpFSR), C(0x0)
MOVW R3, 156(R0)
MRC CpMMU, 0, R3, C(CpFAR), C(0x0)
MOVW R3, 160(R0)
MRC CpMMU, 0, R3, C(CpPID), C(0x0)
MOVW R3, 164(R0)
/* usr */
BIC $(PsrMask), R2, R3
ORR $(0xdf), R3
MOVW R3, CPSR
MOVW SPSR, R11
MOVW R11, 168(R0)
MOVW R12, 172(R0)
MOVW R13, 176(R0)
MOVW R14, 180(R0)
/* irq */
BIC $(PsrMask), R2, R3
ORR $(0xd2), R3
MOVW R3, CPSR
MOVW SPSR, R11
MOVW R11, 64(R0)
MOVW R12, 68(R0)
MOVW R13, 72(R0)
MOVW R14, 76(R0)
/* und */
BIC $(PsrMask), R2, R3
ORR $(0xdb), R3
MOVW R3, CPSR
MOVW SPSR, R11
MOVW R11, 80(R0)
MOVW R12, 84(R0)
MOVW R13, 88(R0)
MOVW R14, 92(R0)
/* abt */
BIC $(PsrMask), R2, R3
ORR $(0xd7), R3
MOVW R3, CPSR
MOVW SPSR, R11
MOVW R11, 96(R0)
MOVW R12, 100(R0)
MOVW R13, 104(R0)
MOVW R14, 108(R0)
/* fiq */
BIC $(PsrMask), R2, R3
ORR $(0xd1), R3
MOVW R3, CPSR
MOVW SPSR, R7
MOVW R7, 112(R0)
MOVW R8, 116(R0)
MOVW R9, 120(R0)
MOVW R10,124(R0)
MOVW R11,128(R0)
MOVW R12,132(R0)
MOVW R13,136(R0)
MOVW R14,140(R0)
/* done */
MOVW R2, CPSR
MOVW R1, SPSR
MOVW $0, R0
RET
/* Entered after a resume from suspend state.
* The bootldr jumps here after a processor reset.
*/
TEXT power_resume(SB), $-4
MOVW $setR12(SB), R12 /* load the SB */
/* SVC mode, interrupts disabled */
MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R1
MOVW R1, CPSR
/* gotopowerlabel() */
/* svc */
MOVW $power_state+0(SB), R0
MOVW 56(R0), R1 /* R1: SPSR, R2: CPSR */
MOVW 60(R0), R2
MOVW R1, SPSR
MOVW R2, CPSR
/* copro */
/* flush caches */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x7), 0
/* drain prefetch */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
/* drain write buffer */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
MCR CpMMU, 0, R0, C(CpTLBFlush), C(0x7)
MOVW 144(R0), R3
MCR CpMMU, 0, R3, C(CpDAC), C(0x0)
MOVW 148(R0), R3
MCR CpMMU, 0, R3, C(CpTTB), C(0x0)
MOVW 156(R0), R3
MCR CpMMU, 0, R3, C(CpFSR), C(0x0)
MOVW 160(R0), R3
MCR CpMMU, 0, R3, C(CpFAR), C(0x0)
MOVW 164(R0), R3
MCR CpMMU, 0, R3, C(CpPID), C(0x0)
MOVW 152(R0), R3
MCR CpMMU, 0, R3, C(CpControl), C(0x0) /* Enable cache */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
/* flush i&d caches */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x7), 0
/* flush tlb */
MCR CpMMU, 0, R0, C(CpTLBFlush), C(0x7), 0
/* drain prefetch */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
/* usr */
BIC $(PsrMask), R2, R3
ORR $(0xdf), R3
MOVW 168(R0), R11
MOVW 172(R0), R12
MOVW 176(R0), R13
MOVW 180(R0), R14
MOVW R11, SPSR
/* irq */
BIC $(PsrMask), R2, R3
ORR $(0xd2), R3
MOVW R3, CPSR
MOVW 64(R0), R11
MOVW 68(R0), R12
MOVW 72(R0), R13
MOVW 76(R0), R14
MOVW R11, SPSR
/* und */
BIC $(PsrMask), R2, R3
ORR $(0xdb), R3
MOVW R3, CPSR
MOVW 80(R0), R11
MOVW 84(R0), R12
MOVW 88(R0), R13
MOVW 92(R0), R14
MOVW R11, SPSR
/* abt */
BIC $(PsrMask), R2, R3
ORR $(0xd7), R3
MOVW R3, CPSR
MOVW 96(R0), R11
MOVW 100(R0), R12
MOVW 104(R0), R13
MOVW 108(R0), R14
MOVW R11, SPSR
/* fiq */
BIC $(PsrMask), R2, R3
ORR $(0xd1), R3
MOVW R3, CPSR
MOVW 112(R0), R7
MOVW 116(R0), R8
MOVW 120(R0), R9
MOVW 124(R0), R10
MOVW 128(R0), R11
MOVW 132(R0), R12
MOVW 136(R0), R13
MOVW 140(R0), R14
MOVW R7, SPSR
/* svc */
MOVW 56(R0), R1
MOVW 60(R0), R2
MOVW R1, SPSR
MOVW R2, CPSR
MOVW 0(R0), R1
MOVW 4(R0), R2
MOVW 8(R0), R3
MOVW 12(R0),R4
MOVW 16(R0),R5
MOVW 20(R0),R6
MOVW 24(R0),R7
MOVW 28(R0),R8
MOVW 32(R0),R9
MOVW 36(R0),R10
MOVW 40(R0),R11
MOVW 44(R0),R12
MOVW 48(R0),R13
MOVW 52(R0),R14
RET
loop:
B loop
TEXT power_down(SB), $-4
TEXT sa1100_power_off<>+0(SB),$8
MOVW resetregs+0(SB),R7
MOVW gpioregs+0(SB),R6
MOVW memconfregs+0(SB),R5
MOVW powerregs+0(SB),R3
/* wakeup on power | rtc */
MOVW $(PWR_rtc|PWR_gpio0),R2
MOVW R2,0xc(R3)
/* clear reset status */
MOVW $(RCSR_all), R2
MOVW R2, 0x4(R7)
/* float */
MOVW $(PCFR_opde|PCFR_fp|PCFR_fs), R2
MOVW R2,0x10(R3)
/* sleep state */
MOVW $0,R2
MOVW R2,0x18(R3)
/* set resume address (pspr)*/
MOVW $resumeaddr+0(SB),R1
MOVW 0x0(R1), R2
MOVW R2,0x8(R3)
BL cacheflush(SB)
/* disable clock switching */
MCR CpPWR, 0, R1, C(CpTest), C(0x2), 2
/* adjust mem timing */
MOVW memconfregs+0(SB),R5
MOVW 0x1c(R5), R2
ORR $(MDREFR_k1db2), R2
MOVW R2, 0x1c(R5)
/* set PLL to lower speed w/ delay (ppcr = 0)*/
MOVW powerregs+0(SB),R3
MOVW $(120*206),R0
l11: SUB $1,R0
BGT l11
MOVW $0, R2
MOVW R2, 0x14(R3)
MOVW $(120*206),R0
l12: SUB $1,R0
BGT l12
/* setup registers for suspend procedure:
* 1. clear RT in mscx (R1, R7, R8)
* 2. clear DRI in mdrefr (R4)
* 3. set slfrsh in mdrefr (R6)
* 4. clear DE in mdcnfg (R9)
* 5. clear dram refresh (R10)
* 6. force sleep (R2)
*/
/* 1 */
MOVW 0x10(R5), R2
BIC $(MSC_rt), R2
MOVW R2, R1
MOVW 0x14(R5), R2
BIC $(MSC_rt), R2
MOVW R2, R7
MOVW 0x2c(R5), R2
BIC $(MSC_rt), R2
MOVW R2, R8
/* 2 */
MOVW 0x1c(R5), R2
BIC $(0xff00), R2
BIC $(0x00f0), R2
MOVW R2, R4
/* 3 */
ORR $(MDREFR_slfrsh), R2, R6
/* 4 */
MOVW 0x0(R5), R9
BIC $(MDCFNG_de), R9, R9
/* 5 */
MOVW R4, R2
BIC $(MDREFR_slfrsh), R2, R2
BIC $(MDREFR_e1pin), R2, R2
MOVW R2, R10
/* 6 */
MOVW $1,R2
TEXT power_magic(SB), $-4
/* power_code gets copied into the area of no-ops below,
* at a cache-line boundary (8 instructions)
*/
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
MOVW R0, R0
TEXT power_code(SB), $-4
/* Follow the procedure; this code gets copied to the no-op
* area preceding this code
*/
/* 1 */
MOVW R1, 0x10(R5)
MOVW R7, 0x14(R5)
MOVW R8, 0x2c(R5)
/* 2 */
MOVW R4, 0x1c(R5)
/* 3 */
MOVW R6, 0x1c(R5)
/* 4 */
MOVW R9, 0x0(R5)
/* 5 */
MOVW R10, 0x1c(R5)
/* 6 */
MOVW R2, 0x0(R3)
slloop:
B slloop /* loop waiting for sleep */
/* The first MCR instruction of this function needs to be on a cache-line
* boundary; to make this happen, it will be copied to the first cache-line
* boundary 8 words from the start of doze.
*
* Doze puts the machine into idle mode. Any interrupt will get it out
* at the next instruction (the RET, to be precise).
*/
TEXT doze(SB), $-4
MOVW $UCDRAMZERO, R1
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
RET
TEXT doze_code(SB), $-4
MCR CpPWR, 0, R0, C(CpTest), C(0x2), 2
MOVW (R1), R0
MCR CpPWR, 0, R0, C(CpTest), C(0x8), 2
|
0intro/9hist
| 2,349
|
bitsy/bitsyreset.s
|
#include "mem.h"
// Bitsy development board uses two banks: KM416S4030C,
// 12 row address bits, 8 col address bits
// Bitsy uses two banks KM416S8030C, 12 row address bits,
// 9 col address bits
// Have to set DRAC0 to 14 row bits or else you only get 8 col bits
// from the formfactor unit configuration registers: 0xF3536257
mdcnfg: // DRAM Configuration Register 10.2.1
WORD 1<<0 | 1<<2 | 0<<3 | 0x5<<4 | 0x3<<8 | 3<<12 | 3<<14
mdrefr0: // DRAM Refresh Control Register 10.2.2
WORD 1<<0 | 0x200<<4 | 1<<21 | 1<<22 | 1 <<31
mdrefr1: // DRAM Refresh Control Register 10.2.2
WORD 1<<0 | 0x200<<4 | 1<<21 | 1<<22
mdrefr2: // DRAM Refresh Control Register 10.2.2
WORD 1<<0 | 0x200<<4 | 1<<20 | 1<<21 | 1<<22
/* MDCAS settings from [1] Table 10-3 (page 10-18) */
waveform0:
WORD 0xAAAAAAA7
waveform1:
WORD 0xAAAAAAAA
waveform2:
WORD 0xAAAAAAAA
delay: // delay without using memory
mov $100, r1 // 200MHz: 100 × (2 instructions @ 5 ns) == 1 ms
l1:
sub $1, r1
bgt l1
sub $1, r0
bgt delay
ret
reset:
mov $INTREGS+4, r0 // turn off interrupts
mov $0, (r0)
// Is this necessary on wakeup?
mov $POWERREGS+14, r0 // set clock speed to 191.7MHz
mov $0xb, (r0)
// This is necessary on hard reset, but not on sleep reset
mov $0x80, r0 // wait ±128 µs
bl delay
/* check to see if we're operating out of DRAM */
bic $0x000000ff, pc, r4
bic $0x0000ff00, r4
bic $0x00ff0000, r4
cmp r4, $PHYSDRAM0
beq dram
dramwakeup:
mov $POWERREGS+0x4, r1 // Clear DH in Power Manager Sleep Status Register
bic $(1<<3), (r1) // DH == DRAM Hold
// This releases nCAS/DQM and nRAS/nSDCS pins to make DRAM exit selfrefresh
/* Set up the DRAM in banks 0 and 1 [1] 10.3 */
mov $MEMCONFREGS, r1
mov mdrefr0, r2 // Turn on K1RUN
mov r2, 0x1c(r1)
mov mdrefr1, r2 // Turn off SLFRSH
mov r2, 0x1c(r1)
mov mdrefr2, r2 // Turn on E1PIN
mov r2, 0x1c(r1)
mov waveform0, r2
mov r2, 0x4(r1)
mov waveform1, r2
mov r2, 0x8(r1)
mov waveform2, r2
mov r2, 0xc(r1)
mov $PHYSDRAM0, r0
mov 0x00(r0), r2 // Eight non-burst read cycles
mov 0x20(r0), r2
mov 0x40(r0), r2
mov 0x60(r0), r2
mov 0x80(r0), r2
mov 0xa0(r0), r2
mov 0xc0(r0), r2
mov 0xe0(r0), r2
mov mdcnfg, r2 // Enable memory banks
mov r2, 0x0(r1)
// Is there any use in turning on EAPD and KAPD in the MDREFR register?
ret
dram:
|
0intro/9hist
| 1,493
|
pc/ptclbsum386.s
|
TEXT ptclbsum(SB), $0
MOVL addr+0(FP), SI
MOVL len+4(FP), CX
XORL AX, AX /* sum */
TESTL $1, SI /* byte aligned? */
MOVL SI, DI
JEQ _2align
DECL CX
JLT _return
MOVB 0x00(SI), AH
INCL SI
_2align:
TESTL $2, SI /* word aligned? */
JEQ _32loop
CMPL CX, $2 /* less than 2 bytes? */
JLT _1dreg
SUBL $2, CX
XORL BX, BX
MOVW 0x00(SI), BX
ADDL BX, AX
ADCL $0, AX
LEAL 2(SI), SI
_32loop:
CMPL CX, $0x20
JLT _8loop
MOVL CX, BP
SHRL $5, BP
ANDL $0x1F, CX
_32loopx:
MOVL 0x00(SI), BX
MOVL 0x1C(SI), DX
ADCL BX, AX
MOVL 0x04(SI), BX
ADCL DX, AX
MOVL 0x10(SI), DX
ADCL BX, AX
MOVL 0x08(SI), BX
ADCL DX, AX
MOVL 0x14(SI), DX
ADCL BX, AX
MOVL 0x0C(SI), BX
ADCL DX, AX
MOVL 0x18(SI), DX
ADCL BX, AX
LEAL 0x20(SI), SI
ADCL DX, AX
DECL BP
JNE _32loopx
ADCL $0, AX
_8loop:
CMPL CX, $0x08
JLT _2loop
MOVL CX, BP
SHRL $3, BP
ANDL $0x07, CX
_8loopx:
MOVL 0x00(SI), BX
ADCL BX, AX
MOVL 0x04(SI), DX
ADCL DX, AX
LEAL 0x08(SI), SI
DECL BP
JNE _8loopx
ADCL $0, AX
_2loop:
CMPL CX, $0x02
JLT _1dreg
MOVL CX, BP
SHRL $1, BP
ANDL $0x01, CX
_2loopx:
MOVWLZX 0x00(SI), BX
ADCL BX, AX
LEAL 0x02(SI), SI
DECL BP
JNE _2loopx
ADCL $0, AX
_1dreg:
TESTL $1, CX /* 1 byte left? */
JEQ _fold
XORL BX, BX
MOVB 0x00(SI), BX
ADDL BX, AX
ADCL $0, AX
_fold:
MOVL AX, BX
SHRL $16, BX
JEQ _swab
ANDL $0xFFFF, AX
ADDL BX, AX
JMP _fold
_swab:
TESTL $1, addr+0(FP)
/*TESTL $1, DI*/
JNE _return
XCHGB AH, AL
_return:
RET
|
0intro/9hist
| 1,527
|
pc/apmjump.s
|
/*
* Far call, absolute indirect.
* The argument is the offset.
* We use a global structure for the jump params,
* so this is *not* reentrant or thread safe.
*/
#include "mem.h"
#define SSOVERRIDE BYTE $0x36
#define CSOVERRIDE BYTE $0x2E
#define RETF BYTE $0xCB
GLOBL apmjumpstruct+0(SB), $8
TEXT fortytwo(SB), $0
MOVL $42, AX
RETF
TEXT getcs(SB), $0
PUSHL CS
POPL AX
RET
TEXT apmfarcall(SB), $0
/*
* We call push and pop ourselves.
* As soon as we do the first push or pop,
* we can't use FP anymore.
*/
MOVL off+4(FP), BX
MOVL seg+0(FP), CX
MOVL BX, apmjumpstruct+0(SB)
MOVL CX, apmjumpstruct+4(SB)
/* load necessary registers from Ureg */
MOVL ureg+8(FP), DI
MOVL 28(DI), AX
MOVL 16(DI), BX
MOVL 24(DI), CX
MOVL 20(DI), DX
/* save registers, segments */
PUSHL DS
PUSHL ES
PUSHL FS
PUSHL GS
PUSHL BP
PUSHL DI
/*
* paranoia: zero the segments, since it's the
* BIOS's responsibility to initialize them.
* (trick picked up from Linux driver).
PUSHL DX
XORL DX, DX
PUSHL DX
POPL DS
PUSHL DX
POPL ES
PUSHL DX
POPL FS
PUSHL DX
POPL GS
POPL DX
*/
PUSHL $APMDSEG
POPL DS
/*
* The actual call.
*/
CSOVERRIDE; BYTE $0xFF; BYTE $0x1D
LONG $apmjumpstruct+0(SB)
/* restore segments, registers */
POPL DI
POPL BP
POPL GS
POPL FS
POPL ES
POPL DS
PUSHFL
POPL 64(DI)
/* store interesting registers back in Ureg */
MOVL AX, 28(DI)
MOVL BX, 16(DI)
MOVL CX, 24(DI)
MOVL DX, 20(DI)
MOVL SI, 4(DI)
PUSHFL
POPL AX
ANDL $1, AX /* carry flag */
RET
|
0intro/9hist
| 20,868
|
pc/l.s
|
#include "mem.h"
#define PADDR(a) ((a) & ~KZERO)
#define KADDR(a) (KZERO|(a))
/*
* Some machine instructions not handled by 8[al].
*/
#define OP16 BYTE $0x66
#define DELAY BYTE $0xEB; BYTE $0x00 /* JMP .+2 */
#define CPUID BYTE $0x0F; BYTE $0xA2 /* CPUID, argument in AX */
#define WRMSR BYTE $0x0F; BYTE $0x30 /* WRMSR, argument in AX/DX (lo/hi) */
#define RDMSR BYTE $0x0F; BYTE $0x32 /* RDMSR, result in AX/DX (lo/hi) */
#define RDTSC BYTE $0x0F; BYTE $0x31
#define WBINVD BYTE $0x0F; BYTE $0x09
#define HLT BYTE $0xF4
#define SFENCE BYTE $0x0F; BYTE $0xAE; BYTE $0xF8
/*
* Macros for calculating offsets within the page directory base
* and page tables. Note that these are assembler-specific hence
* the '<<2'.
*/
#define PDO(a) (((((a))>>22) & 0x03FF)<<2)
#define PTO(a) (((((a))>>12) & 0x03FF)<<2)
/*
* For backwards compatiblity with 9load - should go away when 9load is changed
* 9load currently sets up the mmu, however the first 16MB of memory is identity
* mapped, so behave as if the mmu was not setup
*/
TEXT _start0x80100020(SB),$0
MOVL $_start0x00100020(SB), AX
ANDL $~KZERO, AX
JMP* AX
/*
* In protected mode with paging turned off and segment registers setup to linear map all memory.
* Entered via a jump to 0x00100020, the physical address of the virtual kernel entry point of 0x80100020
* Make the basic page tables for processor 0. Four pages are needed for the basic set:
* a page directory, a page table for mapping the first 4MB of physical memory to KZERO,
* and virtual and physical pages for mapping the Mach structure.
* The remaining PTEs will be allocated later when memory is sized.
* An identity mmu map is also needed for the switch to virtual mode. This
* identity mapping is removed once the MMU is going and the JMP has been made
* to virtual memory.
*/
TEXT _start0x00100020(SB),$0
CLI /* make sure interrupts are off */
MOVL $PADDR(CPU0PDB), DI /* clear 4 pages for the tables etc. */
XORL AX, AX
MOVL $(4*BY2PG), CX
SHRL $2, CX
CLD
REP; STOSL
MOVL $PADDR(CPU0PDB), AX
ADDL $PDO(KZERO), AX /* page directory offset for KZERO */
MOVL $PADDR(CPU0PTE), (AX) /* PTE's for 0x80000000 */
MOVL $(PTEWRITE|PTEVALID), BX /* page permissions */
ORL BX, (AX)
MOVL $PADDR(CPU0PTE), AX /* first page of page table */
MOVL $1024, CX /* 1024 pages in 4MB */
_setpte:
MOVL BX, (AX)
ADDL $(1<<PGSHIFT), BX
ADDL $4, AX
LOOP _setpte
MOVL $PADDR(CPU0PTE), AX
ADDL $PTO(MACHADDR), AX /* page table entry offset for MACHADDR */
MOVL $PADDR(CPU0MACH), (AX) /* PTE for Mach */
MOVL $(PTEWRITE|PTEVALID), BX /* page permissions */
ORL BX, (AX)
/*
* Now ready to use the new map. Make sure the processor options are what is wanted.
* It is necessary on some processors to immediately follow mode switching with a JMP instruction
* to clear the prefetch queues.
*/
MOVL $PADDR(CPU0PDB), CX /* load address of page directory */
MOVL (PDO(KZERO))(CX), DX /* double-map KZERO at 0 */
MOVL DX, (PDO(0))(CX)
MOVL CX, CR3
DELAY /* JMP .+2 */
MOVL CR0, DX
ORL $0x80010000, DX /* PG|WP */
ANDL $~0x6000000A, DX /* ~(CD|NW|TS|MP) */
MOVL $_startpg(SB), AX /* this is a virtual address */
MOVL DX, CR0 /* turn on paging */
JMP* AX /* jump to the virtual nirvana */
/*
* Basic machine environment set, can clear BSS and create a stack.
* The stack starts at the top of the page containing the Mach structure.
* The x86 architecture forces the use of the same virtual address for
* each processor's Mach structure, so the global Mach pointer 'm' can
* be initialised here.
*/
TEXT _startpg(SB), $0
MOVL $0, (PDO(0))(CX) /* undo double-map of KZERO at 0 */
MOVL CX, CR3 /* load and flush the mmu */
_clearbss:
MOVL $edata(SB), DI
XORL AX, AX
MOVL $end(SB), CX
SUBL DI, CX /* end-edata bytes */
SHRL $2, CX /* end-edata doublewords */
CLD
REP; STOSL /* clear BSS */
MOVL $MACHADDR, SP
MOVL SP, m(SB) /* initialise global Mach pointer */
MOVL $0, 0(SP) /* initialise m->machno */
ADDL $(MACHSIZE-4), SP /* initialise stack */
/*
* Need to do one final thing to ensure a clean machine environment,
* clear the EFLAGS register, which can only be done once there is a stack.
*/
MOVL $0, AX
PUSHL AX
POPFL
CALL main(SB)
/*
* Park a processor. Should never fall through a return from main to here,
* should only be called by application processors when shutting down.
*/
TEXT idle(SB), $0
_idle:
STI
HLT
JMP _idle
/*
* Port I/O.
* in[bsl] input a byte|short|long
* ins[bsl] input a string of bytes|shorts|longs
* out[bsl] output a byte|short|long
* outs[bsl] output a string of bytes|shorts|longs
*/
TEXT inb(SB), $0
MOVL port+0(FP), DX
XORL AX, AX
INB
RET
TEXT insb(SB), $0
MOVL port+0(FP), DX
MOVL address+4(FP), DI
MOVL count+8(FP), CX
CLD
REP; INSB
RET
TEXT ins(SB), $0
MOVL port+0(FP), DX
XORL AX, AX
OP16; INL
RET
TEXT inss(SB), $0
MOVL port+0(FP), DX
MOVL address+4(FP), DI
MOVL count+8(FP), CX
CLD
REP; OP16; INSL
RET
TEXT inl(SB), $0
MOVL port+0(FP), DX
INL
RET
TEXT insl(SB), $0
MOVL port+0(FP), DX
MOVL address+4(FP), DI
MOVL count+8(FP), CX
CLD
REP; INSL
RET
TEXT outb(SB), $0
MOVL port+0(FP), DX
MOVL byte+4(FP), AX
OUTB
RET
TEXT outsb(SB),$0
MOVL port+0(FP), DX
MOVL address+4(FP), SI
MOVL count+8(FP), CX
CLD
REP; OUTSB
RET
TEXT outs(SB), $0
MOVL port+0(FP), DX
MOVL short+4(FP), AX
OP16; OUTL
RET
TEXT outss(SB), $0
MOVL port+0(FP), DX
MOVL address+4(FP), SI
MOVL count+8(FP), CX
CLD
REP; OP16; OUTSL
RET
TEXT outl(SB), $0
MOVL port+0(FP), DX
MOVL long+4(FP), AX
OUTL
RET
TEXT outsl(SB), $0
MOVL port+0(FP), DX
MOVL address+4(FP), SI
MOVL count+8(FP), CX
CLD
REP; OUTSL
RET
/*
* Read/write various system registers.
* CR4 and the 'model specific registers' should only be read/written
* after it has been determined the processor supports them
*/
TEXT lgdt(SB), $0 /* GDTR - global descriptor table */
MOVL gdtptr+0(FP), AX
MOVL (AX), GDTR
RET
TEXT lidt(SB), $0 /* IDTR - interrupt descriptor table */
MOVL idtptr+0(FP), AX
MOVL (AX), IDTR
RET
TEXT ltr(SB), $0 /* TR - task register */
MOVL tptr+0(FP), AX
MOVW AX, TASK
RET
TEXT getcr0(SB), $0 /* CR0 - processor control */
MOVL CR0, AX
RET
TEXT getcr2(SB), $0 /* CR2 - page fault linear address */
MOVL CR2, AX
RET
TEXT getcr3(SB), $0 /* CR3 - page directory base */
MOVL CR3, AX
RET
TEXT putcr3(SB), $0
MOVL cr3+0(FP), AX
MOVL AX, CR3
RET
TEXT getcr4(SB), $0 /* CR4 - extensions */
MOVL CR4, AX
RET
TEXT putcr4(SB), $0
MOVL cr4+0(FP), AX
MOVL AX, CR4
RET
TEXT rdtsc(SB), $0 /* time stamp counter; cycles since power up */
RDTSC
MOVL vlong+0(FP), CX /* &vlong */
MOVL AX, 0(CX) /* lo */
MOVL DX, 4(CX) /* hi */
RET
TEXT rdmsr(SB), $0 /* model-specific register */
MOVL index+0(FP), CX
RDMSR
MOVL vlong+4(FP), CX /* &vlong */
MOVL AX, 0(CX) /* lo */
MOVL DX, 4(CX) /* hi */
RET
TEXT wrmsr(SB), $0
MOVL index+0(FP), CX
MOVL lo+4(FP), AX
MOVL hi+8(FP), DX
WRMSR
RET
TEXT wbinvd(SB), $0
WBINVD
RET
TEXT sfence(SB), $0
SFENCE
RET
/*
* Try to determine the CPU type which requires fiddling with EFLAGS.
* If the Id bit can be toggled then the CPUID instruciton can be used
* to determine CPU identity and features. First have to check if it's
* a 386 (Ac bit can't be set). If it's not a 386 and the Id bit can't be
* toggled then it's an older 486 of some kind.
*
* cpuid(id[], &ax, &dx);
*/
TEXT cpuid(SB), $0
MOVL $0x240000, AX
PUSHL AX
POPFL /* set Id|Ac */
PUSHFL
POPL BX /* retrieve value */
MOVL $0, AX
PUSHL AX
POPFL /* clear Id|Ac, EFLAGS initialised */
PUSHFL
POPL AX /* retrieve value */
XORL BX, AX
TESTL $0x040000, AX /* Ac */
JZ _cpu386 /* can't set this bit on 386 */
TESTL $0x200000, AX /* Id */
JZ _cpu486 /* can't toggle this bit on some 486 */
MOVL $0, AX
CPUID
MOVL id+0(FP), BP
MOVL BX, 0(BP) /* "Genu" "Auth" "Cyri" */
MOVL DX, 4(BP) /* "ineI" "enti" "xIns" */
MOVL CX, 8(BP) /* "ntel" "cAMD" "tead" */
MOVL $1, AX
CPUID
JMP _cpuid
_cpu486:
MOVL $0x400, AX
MOVL $0, DX
JMP _cpuid
_cpu386:
MOVL $0x300, AX
MOVL $0, DX
_cpuid:
MOVL ax+4(FP), BP
MOVL AX, 0(BP)
MOVL dx+8(FP), BP
MOVL DX, 0(BP)
RET
/*
* Basic timing loop to determine CPU frequency.
*/
TEXT aamloop(SB), $0
MOVL count+0(FP), CX
_aamloop:
AAM
LOOP _aamloop
RET
/*
* Floating point.
*/
#define FPOFF ;\
WAIT ;\
MOVL CR0, AX ;\
ANDL $~0x4, AX /* EM=0 */ ;\
ORL $0x28, AX /* NE=1, TS=1 */ ;\
MOVL AX, CR0
#define FPON ;\
MOVL CR0, AX ;\
ANDL $~0xC, AX /* EM=0, TS=0 */ ;\
MOVL AX, CR0
TEXT fpoff(SB), $0 /* disable */
FPOFF
RET
TEXT fpinit(SB), $0 /* enable and init */
FPON
FINIT
WAIT
/* setfcr(FPPDBL|FPRNR|FPINVAL|FPZDIV|FPOVFL) */
/* note that low 6 bits are masks, not enables, on this chip */
PUSHW $0x0232
FLDCW 0(SP)
POPW AX
WAIT
RET
TEXT fpsave(SB), $0 /* save state and disable */
MOVL p+0(FP), AX
FSAVE 0(AX) /* no WAIT */
FPOFF
RET
TEXT fprestore(SB), $0 /* enable and restore state */
FPON
MOVL p+0(FP), AX
FRSTOR 0(AX)
WAIT
RET
TEXT fpstatus(SB), $0 /* get floating point status */
FSTSW AX
RET
TEXT fpenv(SB), $0 /* save state without waiting */
MOVL p+0(FP), AX
FSTENV 0(AX)
RET
/*
*/
TEXT splhi(SB), $0
MOVL $(MACHADDR+0x04), AX /* save PC in m->splpc */
MOVL (SP), BX
MOVL BX, (AX)
PUSHFL
POPL AX
CLI
RET
TEXT spllo(SB), $0
PUSHFL
POPL AX
STI
RET
TEXT splx(SB), $0
MOVL $(MACHADDR+0x04), AX /* save PC in m->splpc */
MOVL (SP), BX
MOVL BX, (AX)
TEXT splxpc(SB), $0 /* for iunlock */
MOVL s+0(FP), AX
PUSHL AX
POPFL
RET
TEXT spldone(SB), $0
RET
TEXT islo(SB), $0
PUSHFL
POPL AX
ANDL $0x200, AX /* interrupt enable flag */
RET
/*
* Test-And-Set
*/
TEXT tas(SB), $0
MOVL $0xDEADDEAD, AX
MOVL lock+0(FP), BX
XCHGL AX, (BX) /* lock->key */
RET
TEXT wbflush(SB), $0
CPUID
RET
TEXT xchgw(SB), $0
MOVL v+4(FP), AX
MOVL p+0(FP), BX
XCHGW AX, (BX)
RET
/*
TEXT xchgl(SB), $0
MOVL v+4(FP), AX
MOVL p+0(FP), BX
XCHGL AX, (BX)
RET
*/
/*
* label consists of a stack pointer and a PC
*/
TEXT gotolabel(SB), $0
MOVL label+0(FP), AX
MOVL 0(AX), SP /* restore sp */
MOVL 4(AX), AX /* put return pc on the stack */
MOVL AX, 0(SP)
MOVL $1, AX /* return 1 */
RET
TEXT setlabel(SB), $0
MOVL label+0(FP), AX
MOVL SP, 0(AX) /* store sp */
MOVL 0(SP), BX /* store return pc */
MOVL BX, 4(AX)
MOVL $0, AX /* return 0 */
RET
/*
* Attempt at power saving. -rsc
*/
TEXT halt(SB), $0
CLI
CMPL nrdy(SB), $0
JEQ _nothingready
STI
RET
_nothingready:
STI
HLT
RET
/*
* Interrupt/exception handling.
* Each entry in the vector table calls either _strayintr or _strayintrx depending
* on whether an error code has been automatically pushed onto the stack
* (_strayintrx) or not, in which case a dummy entry must be pushed before retrieving
* the trap type from the vector table entry and placing it on the stack as part
* of the Ureg structure.
* The size of each entry in the vector table (6 bytes) is known in trapinit().
*/
TEXT _strayintr(SB), $0
PUSHL AX /* save AX */
MOVL 4(SP), AX /* return PC from vectortable(SB) */
JMP intrcommon
TEXT _strayintrx(SB), $0
XCHGL AX, (SP) /* swap AX with vectortable CALL PC */
intrcommon:
PUSHL DS /* save DS */
PUSHL $(KDSEL)
POPL DS /* fix up DS */
MOVBLZX (AX), AX /* trap type -> AX */
XCHGL AX, 4(SP) /* exchange trap type with saved AX */
PUSHL ES /* save ES */
PUSHL $(KDSEL)
POPL ES /* fix up ES */
PUSHL FS /* save the rest of the Ureg struct */
PUSHL GS
PUSHAL
PUSHL SP /* Ureg* argument to trap */
CALL trap(SB)
TEXT forkret(SB), $0
POPL AX
POPAL
POPL GS
POPL FS
POPL ES
POPL DS
ADDL $8, SP /* pop error code and trap type */
IRETL
TEXT vectortable(SB), $0
CALL _strayintr(SB); BYTE $0x00 /* divide error */
CALL _strayintr(SB); BYTE $0x01 /* debug exception */
CALL _strayintr(SB); BYTE $0x02 /* NMI interrupt */
CALL _strayintr(SB); BYTE $0x03 /* breakpoint */
CALL _strayintr(SB); BYTE $0x04 /* overflow */
CALL _strayintr(SB); BYTE $0x05 /* bound */
CALL _strayintr(SB); BYTE $0x06 /* invalid opcode */
CALL _strayintr(SB); BYTE $0x07 /* no coprocessor available */
CALL _strayintrx(SB); BYTE $0x08 /* double fault */
CALL _strayintr(SB); BYTE $0x09 /* coprocessor segment overflow */
CALL _strayintrx(SB); BYTE $0x0A /* invalid TSS */
CALL _strayintrx(SB); BYTE $0x0B /* segment not available */
CALL _strayintrx(SB); BYTE $0x0C /* stack exception */
CALL _strayintrx(SB); BYTE $0x0D /* general protection error */
CALL _strayintrx(SB); BYTE $0x0E /* page fault */
CALL _strayintr(SB); BYTE $0x0F /* */
CALL _strayintr(SB); BYTE $0x10 /* coprocessor error */
CALL _strayintrx(SB); BYTE $0x11 /* alignment check */
CALL _strayintr(SB); BYTE $0x12 /* machine check */
CALL _strayintr(SB); BYTE $0x13
CALL _strayintr(SB); BYTE $0x14
CALL _strayintr(SB); BYTE $0x15
CALL _strayintr(SB); BYTE $0x16
CALL _strayintr(SB); BYTE $0x17
CALL _strayintr(SB); BYTE $0x18
CALL _strayintr(SB); BYTE $0x19
CALL _strayintr(SB); BYTE $0x1A
CALL _strayintr(SB); BYTE $0x1B
CALL _strayintr(SB); BYTE $0x1C
CALL _strayintr(SB); BYTE $0x1D
CALL _strayintr(SB); BYTE $0x1E
CALL _strayintr(SB); BYTE $0x1F
CALL _strayintr(SB); BYTE $0x20 /* VectorLAPIC */
CALL _strayintr(SB); BYTE $0x21
CALL _strayintr(SB); BYTE $0x22
CALL _strayintr(SB); BYTE $0x23
CALL _strayintr(SB); BYTE $0x24
CALL _strayintr(SB); BYTE $0x25
CALL _strayintr(SB); BYTE $0x26
CALL _strayintr(SB); BYTE $0x27
CALL _strayintr(SB); BYTE $0x28
CALL _strayintr(SB); BYTE $0x29
CALL _strayintr(SB); BYTE $0x2A
CALL _strayintr(SB); BYTE $0x2B
CALL _strayintr(SB); BYTE $0x2C
CALL _strayintr(SB); BYTE $0x2D
CALL _strayintr(SB); BYTE $0x2E
CALL _strayintr(SB); BYTE $0x2F
CALL _strayintr(SB); BYTE $0x30
CALL _strayintr(SB); BYTE $0x31
CALL _strayintr(SB); BYTE $0x32
CALL _strayintr(SB); BYTE $0x33
CALL _strayintr(SB); BYTE $0x34
CALL _strayintr(SB); BYTE $0x35
CALL _strayintr(SB); BYTE $0x36
CALL _strayintr(SB); BYTE $0x37
CALL _strayintr(SB); BYTE $0x38
CALL _strayintr(SB); BYTE $0x39
CALL _strayintr(SB); BYTE $0x3A
CALL _strayintr(SB); BYTE $0x3B
CALL _strayintr(SB); BYTE $0x3C
CALL _strayintr(SB); BYTE $0x3D
CALL _strayintr(SB); BYTE $0x3E
CALL _strayintr(SB); BYTE $0x3F
CALL _syscallintr(SB); BYTE $0x40 /* VectorSYSCALL */
CALL _strayintr(SB); BYTE $0x41
CALL _strayintr(SB); BYTE $0x42
CALL _strayintr(SB); BYTE $0x43
CALL _strayintr(SB); BYTE $0x44
CALL _strayintr(SB); BYTE $0x45
CALL _strayintr(SB); BYTE $0x46
CALL _strayintr(SB); BYTE $0x47
CALL _strayintr(SB); BYTE $0x48
CALL _strayintr(SB); BYTE $0x49
CALL _strayintr(SB); BYTE $0x4A
CALL _strayintr(SB); BYTE $0x4B
CALL _strayintr(SB); BYTE $0x4C
CALL _strayintr(SB); BYTE $0x4D
CALL _strayintr(SB); BYTE $0x4E
CALL _strayintr(SB); BYTE $0x4F
CALL _strayintr(SB); BYTE $0x50
CALL _strayintr(SB); BYTE $0x51
CALL _strayintr(SB); BYTE $0x52
CALL _strayintr(SB); BYTE $0x53
CALL _strayintr(SB); BYTE $0x54
CALL _strayintr(SB); BYTE $0x55
CALL _strayintr(SB); BYTE $0x56
CALL _strayintr(SB); BYTE $0x57
CALL _strayintr(SB); BYTE $0x58
CALL _strayintr(SB); BYTE $0x59
CALL _strayintr(SB); BYTE $0x5A
CALL _strayintr(SB); BYTE $0x5B
CALL _strayintr(SB); BYTE $0x5C
CALL _strayintr(SB); BYTE $0x5D
CALL _strayintr(SB); BYTE $0x5E
CALL _strayintr(SB); BYTE $0x5F
CALL _strayintr(SB); BYTE $0x60
CALL _strayintr(SB); BYTE $0x61
CALL _strayintr(SB); BYTE $0x62
CALL _strayintr(SB); BYTE $0x63
CALL _strayintr(SB); BYTE $0x64
CALL _strayintr(SB); BYTE $0x65
CALL _strayintr(SB); BYTE $0x66
CALL _strayintr(SB); BYTE $0x67
CALL _strayintr(SB); BYTE $0x68
CALL _strayintr(SB); BYTE $0x69
CALL _strayintr(SB); BYTE $0x6A
CALL _strayintr(SB); BYTE $0x6B
CALL _strayintr(SB); BYTE $0x6C
CALL _strayintr(SB); BYTE $0x6D
CALL _strayintr(SB); BYTE $0x6E
CALL _strayintr(SB); BYTE $0x6F
CALL _strayintr(SB); BYTE $0x70
CALL _strayintr(SB); BYTE $0x71
CALL _strayintr(SB); BYTE $0x72
CALL _strayintr(SB); BYTE $0x73
CALL _strayintr(SB); BYTE $0x74
CALL _strayintr(SB); BYTE $0x75
CALL _strayintr(SB); BYTE $0x76
CALL _strayintr(SB); BYTE $0x77
CALL _strayintr(SB); BYTE $0x78
CALL _strayintr(SB); BYTE $0x79
CALL _strayintr(SB); BYTE $0x7A
CALL _strayintr(SB); BYTE $0x7B
CALL _strayintr(SB); BYTE $0x7C
CALL _strayintr(SB); BYTE $0x7D
CALL _strayintr(SB); BYTE $0x7E
CALL _strayintr(SB); BYTE $0x7F
CALL _strayintr(SB); BYTE $0x80 /* Vector[A]PIC */
CALL _strayintr(SB); BYTE $0x81
CALL _strayintr(SB); BYTE $0x82
CALL _strayintr(SB); BYTE $0x83
CALL _strayintr(SB); BYTE $0x84
CALL _strayintr(SB); BYTE $0x85
CALL _strayintr(SB); BYTE $0x86
CALL _strayintr(SB); BYTE $0x87
CALL _strayintr(SB); BYTE $0x88
CALL _strayintr(SB); BYTE $0x89
CALL _strayintr(SB); BYTE $0x8A
CALL _strayintr(SB); BYTE $0x8B
CALL _strayintr(SB); BYTE $0x8C
CALL _strayintr(SB); BYTE $0x8D
CALL _strayintr(SB); BYTE $0x8E
CALL _strayintr(SB); BYTE $0x8F
CALL _strayintr(SB); BYTE $0x90
CALL _strayintr(SB); BYTE $0x91
CALL _strayintr(SB); BYTE $0x92
CALL _strayintr(SB); BYTE $0x93
CALL _strayintr(SB); BYTE $0x94
CALL _strayintr(SB); BYTE $0x95
CALL _strayintr(SB); BYTE $0x96
CALL _strayintr(SB); BYTE $0x97
CALL _strayintr(SB); BYTE $0x98
CALL _strayintr(SB); BYTE $0x99
CALL _strayintr(SB); BYTE $0x9A
CALL _strayintr(SB); BYTE $0x9B
CALL _strayintr(SB); BYTE $0x9C
CALL _strayintr(SB); BYTE $0x9D
CALL _strayintr(SB); BYTE $0x9E
CALL _strayintr(SB); BYTE $0x9F
CALL _strayintr(SB); BYTE $0xA0
CALL _strayintr(SB); BYTE $0xA1
CALL _strayintr(SB); BYTE $0xA2
CALL _strayintr(SB); BYTE $0xA3
CALL _strayintr(SB); BYTE $0xA4
CALL _strayintr(SB); BYTE $0xA5
CALL _strayintr(SB); BYTE $0xA6
CALL _strayintr(SB); BYTE $0xA7
CALL _strayintr(SB); BYTE $0xA8
CALL _strayintr(SB); BYTE $0xA9
CALL _strayintr(SB); BYTE $0xAA
CALL _strayintr(SB); BYTE $0xAB
CALL _strayintr(SB); BYTE $0xAC
CALL _strayintr(SB); BYTE $0xAD
CALL _strayintr(SB); BYTE $0xAE
CALL _strayintr(SB); BYTE $0xAF
CALL _strayintr(SB); BYTE $0xB0
CALL _strayintr(SB); BYTE $0xB1
CALL _strayintr(SB); BYTE $0xB2
CALL _strayintr(SB); BYTE $0xB3
CALL _strayintr(SB); BYTE $0xB4
CALL _strayintr(SB); BYTE $0xB5
CALL _strayintr(SB); BYTE $0xB6
CALL _strayintr(SB); BYTE $0xB7
CALL _strayintr(SB); BYTE $0xB8
CALL _strayintr(SB); BYTE $0xB9
CALL _strayintr(SB); BYTE $0xBA
CALL _strayintr(SB); BYTE $0xBB
CALL _strayintr(SB); BYTE $0xBC
CALL _strayintr(SB); BYTE $0xBD
CALL _strayintr(SB); BYTE $0xBE
CALL _strayintr(SB); BYTE $0xBF
CALL _strayintr(SB); BYTE $0xC0
CALL _strayintr(SB); BYTE $0xC1
CALL _strayintr(SB); BYTE $0xC2
CALL _strayintr(SB); BYTE $0xC3
CALL _strayintr(SB); BYTE $0xC4
CALL _strayintr(SB); BYTE $0xC5
CALL _strayintr(SB); BYTE $0xC6
CALL _strayintr(SB); BYTE $0xC7
CALL _strayintr(SB); BYTE $0xC8
CALL _strayintr(SB); BYTE $0xC9
CALL _strayintr(SB); BYTE $0xCA
CALL _strayintr(SB); BYTE $0xCB
CALL _strayintr(SB); BYTE $0xCC
CALL _strayintr(SB); BYTE $0xCD
CALL _strayintr(SB); BYTE $0xCE
CALL _strayintr(SB); BYTE $0xCF
CALL _strayintr(SB); BYTE $0xD0
CALL _strayintr(SB); BYTE $0xD1
CALL _strayintr(SB); BYTE $0xD2
CALL _strayintr(SB); BYTE $0xD3
CALL _strayintr(SB); BYTE $0xD4
CALL _strayintr(SB); BYTE $0xD5
CALL _strayintr(SB); BYTE $0xD6
CALL _strayintr(SB); BYTE $0xD7
CALL _strayintr(SB); BYTE $0xD8
CALL _strayintr(SB); BYTE $0xD9
CALL _strayintr(SB); BYTE $0xDA
CALL _strayintr(SB); BYTE $0xDB
CALL _strayintr(SB); BYTE $0xDC
CALL _strayintr(SB); BYTE $0xDD
CALL _strayintr(SB); BYTE $0xDE
CALL _strayintr(SB); BYTE $0xDF
CALL _strayintr(SB); BYTE $0xE0
CALL _strayintr(SB); BYTE $0xE1
CALL _strayintr(SB); BYTE $0xE2
CALL _strayintr(SB); BYTE $0xE3
CALL _strayintr(SB); BYTE $0xE4
CALL _strayintr(SB); BYTE $0xE5
CALL _strayintr(SB); BYTE $0xE6
CALL _strayintr(SB); BYTE $0xE7
CALL _strayintr(SB); BYTE $0xE8
CALL _strayintr(SB); BYTE $0xE9
CALL _strayintr(SB); BYTE $0xEA
CALL _strayintr(SB); BYTE $0xEB
CALL _strayintr(SB); BYTE $0xEC
CALL _strayintr(SB); BYTE $0xED
CALL _strayintr(SB); BYTE $0xEE
CALL _strayintr(SB); BYTE $0xEF
CALL _strayintr(SB); BYTE $0xF0
CALL _strayintr(SB); BYTE $0xF1
CALL _strayintr(SB); BYTE $0xF2
CALL _strayintr(SB); BYTE $0xF3
CALL _strayintr(SB); BYTE $0xF4
CALL _strayintr(SB); BYTE $0xF5
CALL _strayintr(SB); BYTE $0xF6
CALL _strayintr(SB); BYTE $0xF7
CALL _strayintr(SB); BYTE $0xF8
CALL _strayintr(SB); BYTE $0xF9
CALL _strayintr(SB); BYTE $0xFA
CALL _strayintr(SB); BYTE $0xFB
CALL _strayintr(SB); BYTE $0xFC
CALL _strayintr(SB); BYTE $0xFD
CALL _strayintr(SB); BYTE $0xFE
CALL _strayintr(SB); BYTE $0xFF
|
0intro/9hist
| 3,037
|
pc/apbootstrap.s
|
#include "mem.h"
#define NOP BYTE $0x90 /* NOP */
#define LGDT(gdtptr) BYTE $0x0F; /* LGDT */ \
BYTE $0x01; BYTE $0x16; \
WORD $gdtptr
#define FARJUMP16(s, o) BYTE $0xEA; /* far jump to ptr16:16 */ \
WORD $o; WORD $s; \
NOP; NOP; NOP
#define FARJUMP32(s, o) BYTE $0x66; /* far jump to ptr32:16 */ \
BYTE $0xEA; LONG $o; WORD $s
#define DELAY BYTE $0xEB; /* JMP .+2 */ \
BYTE $0x00
#define INVD BYTE $0x0F; BYTE $0x08
#define WBINVD BYTE $0x0F; BYTE $0x09
/*
* Macros for calculating offsets within the page directory base
* and page tables. Note that these are assembler-specific hence
* the '<<2'.
*/
#define PDO(a) (((((a))>>22) & 0x03FF)<<2)
#define PTO(a) (((((a))>>12) & 0x03FF)<<2)
/*
* Start an Application Processor. This must be placed on a 4KB boundary
* somewhere in the 1st MB of conventional memory (APBOOTSTRAP). However,
* due to some shortcuts below it's restricted further to within the 1st
* 64KB. The AP starts in real-mode, with
* CS selector set to the startup memory address/16;
* CS base set to startup memory address;
* CS limit set to 64KB;
* CPL and IP set to 0.
*/
TEXT apbootstrap(SB), $0
FARJUMP16(0, _apbootstrap(SB))
TEXT _apvector(SB), $0 /* address APBOOTSTRAP+0x08 */
LONG $0
TEXT _appdb(SB), $0 /* address APBOOTSTRAP+0x0C */
LONG $0
TEXT _apapic(SB), $0 /* address APBOOTSTRAP+0x10 */
LONG $0
TEXT _apbootstrap(SB), $0 /* address APBOOTSTRAP+0x14 */
MOVW CS, AX
MOVW AX, DS /* initialise DS */
LGDT(gdtptr(SB)) /* load a basic gdt */
MOVL CR0, AX
ORL $1, AX
MOVL AX, CR0 /* turn on protected mode */
DELAY /* JMP .+2 */
BYTE $0xB8; WORD $SELECTOR(1, SELGDT, 0)/* MOVW $SELECTOR(1, SELGDT, 0), AX */
MOVW AX, DS
MOVW AX, ES
MOVW AX, FS
MOVW AX, GS
MOVW AX, SS
FARJUMP32(SELECTOR(2, SELGDT, 0), _ap32-KZERO(SB))
/*
* For Pentiums and higher, the code that enables paging must come from
* pages that are identity mapped.
* To this end double map KZERO at virtual 0 and undo the mapping once virtual
* nirvana has been obtained.
*/
TEXT _ap32(SB), $0
MOVL _appdb-KZERO(SB), CX /* physical address of PDB */
MOVL (PDO(KZERO))(CX), DX /* double-map KZERO at 0 */
MOVL DX, (PDO(0))(CX)
MOVL CX, CR3 /* load and flush the mmu */
MOVL CR0, DX
ORL $0x80010000, DX /* PG|WP */
ANDL $~0x6000000A, DX /* ~(CD|NW|TS|MP) */
MOVL $_appg(SB), AX
MOVL DX, CR0 /* turn on paging */
JMP* AX
TEXT _appg(SB), $0
MOVL CX, AX /* physical address of PDB */
ORL $KZERO, AX
MOVL $0, (PDO(0))(AX) /* undo double-map of KZERO at 0 */
MOVL CX, CR3 /* load and flush the mmu */
MOVL $(MACHADDR+MACHSIZE-4), SP
MOVL $0, AX
PUSHL AX
POPFL
MOVL _apapic(SB), AX
MOVL AX, (SP)
MOVL _apvector(SB), AX
CALL* AX
_aphalt:
HLT
JMP _aphalt
TEXT gdt(SB), $0
LONG $0x0000; LONG $0
LONG $0xFFFF; LONG $(SEGG|SEGB|(0xF<<16)|SEGP|SEGPL(0)|SEGDATA|SEGW)
LONG $0xFFFF; LONG $(SEGG|SEGD|(0xF<<16)|SEGP|SEGPL(0)|SEGEXEC|SEGR)
TEXT gdtptr(SB), $0
WORD $(3*8-1)
LONG $gdt-KZERO(SB)
|
0intro/libtask
| 5,926
|
asm.S
|
/* Copyright (c) 2005-2006 Russ Cox, MIT; see COPYRIGHT */
#if defined(__FreeBSD__) && defined(__i386__) && __FreeBSD__ < 5
#define NEEDX86CONTEXT 1
#define SET setmcontext
#define GET getmcontext
#endif
#if defined(__OpenBSD__) && defined(__i386__)
#define NEEDX86CONTEXT 1
#define SET setmcontext
#define GET getmcontext
#endif
#if defined(__APPLE__)
#if defined(__i386__)
#define NEEDX86CONTEXT 1
#define SET _setmcontext
#define GET _getmcontext
#elif defined(__x86_64__)
#define NEEDAMD64CONTEXT 1
#define SET _setmcontext
#define GET _getmcontext
#else
#define NEEDPOWERCONTEXT 1
#define SET __setmcontext
#define GET __getmcontext
#endif
#endif
#if defined(__linux__) && defined(__amd64__)
#define NEEDAMD64CONTEXT 1
#define SET setmcontext
#define GET getmcontext
#endif
#if defined(__linux__) && defined(__arm__)
#define NEEDARMCONTEXT 1
#define SET setmcontext
#define GET getmcontext
#endif
#if defined(__linux__) && defined(__mips__)
#define NEEDMIPSCONTEXT 1
#define SET setmcontext
#define GET getmcontext
#endif
#ifdef NEEDX86CONTEXT
.globl SET
SET:
movl 4(%esp), %eax
movl 8(%eax), %fs
movl 12(%eax), %es
movl 16(%eax), %ds
movl 76(%eax), %ss
movl 20(%eax), %edi
movl 24(%eax), %esi
movl 28(%eax), %ebp
movl 36(%eax), %ebx
movl 40(%eax), %edx
movl 44(%eax), %ecx
movl 72(%eax), %esp
pushl 60(%eax) /* new %eip */
movl 48(%eax), %eax
ret
.globl GET
GET:
movl 4(%esp), %eax
movl %fs, 8(%eax)
movl %es, 12(%eax)
movl %ds, 16(%eax)
movl %ss, 76(%eax)
movl %edi, 20(%eax)
movl %esi, 24(%eax)
movl %ebp, 28(%eax)
movl %ebx, 36(%eax)
movl %edx, 40(%eax)
movl %ecx, 44(%eax)
movl $1, 48(%eax) /* %eax */
movl (%esp), %ecx /* %eip */
movl %ecx, 60(%eax)
leal 4(%esp), %ecx /* %esp */
movl %ecx, 72(%eax)
movl 44(%eax), %ecx /* restore %ecx */
movl $0, %eax
ret
#endif
#ifdef NEEDAMD64CONTEXT
.globl SET
SET:
movq 16(%rdi), %rsi
movq 24(%rdi), %rdx
movq 32(%rdi), %rcx
movq 40(%rdi), %r8
movq 48(%rdi), %r9
movq 56(%rdi), %rax
movq 64(%rdi), %rbx
movq 72(%rdi), %rbp
movq 80(%rdi), %r10
movq 88(%rdi), %r11
movq 96(%rdi), %r12
movq 104(%rdi), %r13
movq 112(%rdi), %r14
movq 120(%rdi), %r15
movq 184(%rdi), %rsp
pushq 160(%rdi) /* new %eip */
movq 8(%rdi), %rdi
ret
.globl GET
GET:
movq %rdi, 8(%rdi)
movq %rsi, 16(%rdi)
movq %rdx, 24(%rdi)
movq %rcx, 32(%rdi)
movq %r8, 40(%rdi)
movq %r9, 48(%rdi)
movq $1, 56(%rdi) /* %rax */
movq %rbx, 64(%rdi)
movq %rbp, 72(%rdi)
movq %r10, 80(%rdi)
movq %r11, 88(%rdi)
movq %r12, 96(%rdi)
movq %r13, 104(%rdi)
movq %r14, 112(%rdi)
movq %r15, 120(%rdi)
movq (%rsp), %rcx /* %rip */
movq %rcx, 160(%rdi)
leaq 8(%rsp), %rcx /* %rsp */
movq %rcx, 184(%rdi)
movq 32(%rdi), %rcx /* restore %rcx */
movq $0, %rax
ret
#endif
#ifdef NEEDPOWERCONTEXT
/* get FPR and VR use flags with sc 0x7FF3 */
/* get vsave with mfspr reg, 256 */
.text
.align 2
.globl GET
GET: /* xxx: instruction scheduling */
mflr r0
mfcr r5
mfctr r6
mfxer r7
stw r0, 0*4(r3)
stw r5, 1*4(r3)
stw r6, 2*4(r3)
stw r7, 3*4(r3)
stw r1, 4*4(r3)
stw r2, 5*4(r3)
li r5, 1 /* return value for setmcontext */
stw r5, 6*4(r3)
stw r13, (0+7)*4(r3) /* callee-save GPRs */
stw r14, (1+7)*4(r3) /* xxx: block move */
stw r15, (2+7)*4(r3)
stw r16, (3+7)*4(r3)
stw r17, (4+7)*4(r3)
stw r18, (5+7)*4(r3)
stw r19, (6+7)*4(r3)
stw r20, (7+7)*4(r3)
stw r21, (8+7)*4(r3)
stw r22, (9+7)*4(r3)
stw r23, (10+7)*4(r3)
stw r24, (11+7)*4(r3)
stw r25, (12+7)*4(r3)
stw r26, (13+7)*4(r3)
stw r27, (14+7)*4(r3)
stw r28, (15+7)*4(r3)
stw r29, (16+7)*4(r3)
stw r30, (17+7)*4(r3)
stw r31, (18+7)*4(r3)
li r3, 0 /* return */
blr
.globl SET
SET:
lwz r13, (0+7)*4(r3) /* callee-save GPRs */
lwz r14, (1+7)*4(r3) /* xxx: block move */
lwz r15, (2+7)*4(r3)
lwz r16, (3+7)*4(r3)
lwz r17, (4+7)*4(r3)
lwz r18, (5+7)*4(r3)
lwz r19, (6+7)*4(r3)
lwz r20, (7+7)*4(r3)
lwz r21, (8+7)*4(r3)
lwz r22, (9+7)*4(r3)
lwz r23, (10+7)*4(r3)
lwz r24, (11+7)*4(r3)
lwz r25, (12+7)*4(r3)
lwz r26, (13+7)*4(r3)
lwz r27, (14+7)*4(r3)
lwz r28, (15+7)*4(r3)
lwz r29, (16+7)*4(r3)
lwz r30, (17+7)*4(r3)
lwz r31, (18+7)*4(r3)
lwz r1, 4*4(r3)
lwz r2, 5*4(r3)
lwz r0, 0*4(r3)
mtlr r0
lwz r0, 1*4(r3)
mtcr r0 /* mtcrf 0xFF, r0 */
lwz r0, 2*4(r3)
mtctr r0
lwz r0, 3*4(r3)
mtxer r0
lwz r3, 6*4(r3)
blr
#endif
#ifdef NEEDARMCONTEXT
.globl GET
GET:
str r1, [r0,#4]
str r2, [r0,#8]
str r3, [r0,#12]
str r4, [r0,#16]
str r5, [r0,#20]
str r6, [r0,#24]
str r7, [r0,#28]
str r8, [r0,#32]
str r9, [r0,#36]
str r10, [r0,#40]
str r11, [r0,#44]
str r12, [r0,#48]
str r13, [r0,#52]
str r14, [r0,#56]
/* store 1 as r0-to-restore */
mov r1, #1
str r1, [r0]
/* return 0 */
mov r0, #0
mov pc, lr
.globl SET
SET:
ldr r1, [r0,#4]
ldr r2, [r0,#8]
ldr r3, [r0,#12]
ldr r4, [r0,#16]
ldr r5, [r0,#20]
ldr r6, [r0,#24]
ldr r7, [r0,#28]
ldr r8, [r0,#32]
ldr r9, [r0,#36]
ldr r10, [r0,#40]
ldr r11, [r0,#44]
ldr r12, [r0,#48]
ldr r13, [r0,#52]
ldr r14, [r0,#56]
ldr r0, [r0]
mov pc, lr
#endif
#ifdef NEEDMIPSCONTEXT
.globl GET
GET:
sw $4, 24($4)
sw $5, 28($4)
sw $6, 32($4)
sw $7, 36($4)
sw $16, 72($4)
sw $17, 76($4)
sw $18, 80($4)
sw $19, 84($4)
sw $20, 88($4)
sw $21, 92($4)
sw $22, 96($4)
sw $23, 100($4)
sw $28, 120($4) /* gp */
sw $29, 124($4) /* sp */
sw $30, 128($4) /* fp */
sw $31, 132($4) /* ra */
xor $2, $2, $2
j $31
nop
.globl SET
SET:
lw $16, 72($4)
lw $17, 76($4)
lw $18, 80($4)
lw $19, 84($4)
lw $20, 88($4)
lw $21, 92($4)
lw $22, 96($4)
lw $23, 100($4)
lw $28, 120($4) /* gp */
lw $29, 124($4) /* sp */
lw $30, 128($4) /* fp */
/*
* If we set $31 directly and j $31,
* we would loose the outer return address.
* Use a temporary register, then.
*/
lw $8, 132($4) /* ra */
/* bug: not setting the pc causes a bus error */
lw $25, 132($4) /* pc */
lw $5, 28($4)
lw $6, 32($4)
lw $7, 36($4)
lw $4, 24($4)
j $8
nop
#endif
|
0Nera/BMOSP
| 22,513
|
kernel/cpu/idt_stubs.s
|
.text
.code64
.global isr_stubs
.extern isr_generic
common:
.align 16
subq $120, %rsp
movq %rbp, 0(%rsp)
movq %rbx, 8(%rsp)
movq %r15, 16(%rsp)
movq %r14, 24(%rsp)
movq %r13, 32(%rsp)
movq %r12, 40(%rsp)
movq %r11, 48(%rsp)
movq %r10, 56(%rsp)
movq %r9, 64(%rsp)
movq %r8, 72(%rsp)
movq %rax, 80(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 96(%rsp)
movq %rsi, 104(%rsp)
movq %rdi, 112(%rsp)
cld
movq %rsp, %rdi
call isr_generic
movq 0(%rsp), %rbp
movq 8(%rsp), %rbx
movq 16(%rsp), %r15
movq 24(%rsp), %r14
movq 32(%rsp), %r13
movq 40(%rsp), %r12
movq 48(%rsp), %r11
movq 56(%rsp), %r10
movq 64(%rsp), %r9
movq 72(%rsp), %r8
movq 80(%rsp), %rax
movq 88(%rsp), %rcx
movq 96(%rsp), %rdx
movq 104(%rsp), %rsi
movq 112(%rsp), %rdi
addq $136, %rsp
iretq
entry0:
pushq $0
pushq $0
jmp common
.align 16
entry1:
pushq $0
pushq $1
jmp common
.align 16
entry2:
pushq $0
pushq $2
jmp common
.align 16
entry3:
pushq $0
pushq $3
jmp common
.align 16
entry4:
pushq $0
pushq $4
jmp common
.align 16
entry5:
pushq $0
pushq $5
jmp common
.align 16
entry6:
pushq $0
pushq $6
jmp common
.align 16
entry7:
pushq $0
pushq $7
jmp common
.align 16
entry8:
pushq $8
jmp common
.align 16
entry9:
pushq $0
pushq $9
jmp common
.align 16
entry10:
pushq $10
jmp common
.align 16
entry11:
pushq $11
jmp common
.align 16
entry12:
pushq $12
jmp common
.align 16
entry13:
pushq $13
jmp common
.align 16
entry14:
pushq $14
jmp common
.align 16
entry15:
pushq $0
pushq $15
jmp common
.align 16
entry16:
pushq $0
pushq $16
jmp common
.align 16
entry17:
pushq $17
jmp common
.align 16
entry18:
pushq $0
pushq $18
jmp common
.align 16
entry19:
pushq $0
pushq $19
jmp common
.align 16
entry20:
pushq $0
pushq $20
jmp common
.align 16
entry21:
pushq $0
pushq $21
jmp common
.align 16
entry22:
pushq $0
pushq $22
jmp common
.align 16
entry23:
pushq $0
pushq $23
jmp common
.align 16
entry24:
pushq $0
pushq $24
jmp common
.align 16
entry25:
pushq $0
pushq $25
jmp common
.align 16
entry26:
pushq $0
pushq $26
jmp common
.align 16
entry27:
pushq $0
pushq $27
jmp common
.align 16
entry28:
pushq $0
pushq $28
jmp common
.align 16
entry29:
pushq $0
pushq $29
jmp common
.align 16
entry30:
pushq $0
pushq $30
jmp common
.align 16
entry31:
pushq $0
pushq $31
jmp common
.align 16
entry32:
pushq $0
pushq $32
jmp common
.align 16
entry33:
pushq $0
pushq $33
jmp common
.align 16
entry34:
pushq $0
pushq $34
jmp common
.align 16
entry35:
pushq $0
pushq $35
jmp common
.align 16
entry36:
pushq $0
pushq $36
jmp common
.align 16
entry37:
pushq $0
pushq $37
jmp common
.align 16
entry38:
pushq $0
pushq $38
jmp common
.align 16
entry39:
pushq $0
pushq $39
jmp common
.align 16
entry40:
pushq $0
pushq $40
jmp common
.align 16
entry41:
pushq $0
pushq $41
jmp common
.align 16
entry42:
pushq $0
pushq $42
jmp common
.align 16
entry43:
pushq $0
pushq $43
jmp common
.align 16
entry44:
pushq $0
pushq $44
jmp common
.align 16
entry45:
pushq $0
pushq $45
jmp common
.align 16
entry46:
pushq $0
pushq $46
jmp common
.align 16
entry47:
pushq $0
pushq $47
jmp common
.align 16
entry48:
pushq $0
pushq $48
jmp common
.align 16
entry49:
pushq $0
pushq $49
jmp common
.align 16
entry50:
pushq $0
pushq $50
jmp common
.align 16
entry51:
pushq $0
pushq $51
jmp common
.align 16
entry52:
pushq $0
pushq $52
jmp common
.align 16
entry53:
pushq $0
pushq $53
jmp common
.align 16
entry54:
pushq $0
pushq $54
jmp common
.align 16
entry55:
pushq $0
pushq $55
jmp common
.align 16
entry56:
pushq $0
pushq $56
jmp common
.align 16
entry57:
pushq $0
pushq $57
jmp common
.align 16
entry58:
pushq $0
pushq $58
jmp common
.align 16
entry59:
pushq $0
pushq $59
jmp common
.align 16
entry60:
pushq $0
pushq $60
jmp common
.align 16
entry61:
pushq $0
pushq $61
jmp common
.align 16
entry62:
pushq $0
pushq $62
jmp common
.align 16
entry63:
pushq $0
pushq $63
jmp common
.align 16
entry64:
pushq $0
pushq $64
jmp common
.align 16
entry65:
pushq $0
pushq $65
jmp common
.align 16
entry66:
pushq $0
pushq $66
jmp common
.align 16
entry67:
pushq $0
pushq $67
jmp common
.align 16
entry68:
pushq $0
pushq $68
jmp common
.align 16
entry69:
pushq $0
pushq $69
jmp common
.align 16
entry70:
pushq $0
pushq $70
jmp common
.align 16
entry71:
pushq $0
pushq $71
jmp common
.align 16
entry72:
pushq $0
pushq $72
jmp common
.align 16
entry73:
pushq $0
pushq $73
jmp common
.align 16
entry74:
pushq $0
pushq $74
jmp common
.align 16
entry75:
pushq $0
pushq $75
jmp common
.align 16
entry76:
pushq $0
pushq $76
jmp common
.align 16
entry77:
pushq $0
pushq $77
jmp common
.align 16
entry78:
pushq $0
pushq $78
jmp common
.align 16
entry79:
pushq $0
pushq $79
jmp common
.align 16
entry80:
pushq $0
pushq $80
jmp common
.align 16
entry81:
pushq $0
pushq $81
jmp common
.align 16
entry82:
pushq $0
pushq $82
jmp common
.align 16
entry83:
pushq $0
pushq $83
jmp common
.align 16
entry84:
pushq $0
pushq $84
jmp common
.align 16
entry85:
pushq $0
pushq $85
jmp common
.align 16
entry86:
pushq $0
pushq $86
jmp common
.align 16
entry87:
pushq $0
pushq $87
jmp common
.align 16
entry88:
pushq $0
pushq $88
jmp common
.align 16
entry89:
pushq $0
pushq $89
jmp common
.align 16
entry90:
pushq $0
pushq $90
jmp common
.align 16
entry91:
pushq $0
pushq $91
jmp common
.align 16
entry92:
pushq $0
pushq $92
jmp common
.align 16
entry93:
pushq $0
pushq $93
jmp common
.align 16
entry94:
pushq $0
pushq $94
jmp common
.align 16
entry95:
pushq $0
pushq $95
jmp common
.align 16
entry96:
pushq $0
pushq $96
jmp common
.align 16
entry97:
pushq $0
pushq $97
jmp common
.align 16
entry98:
pushq $0
pushq $98
jmp common
.align 16
entry99:
pushq $0
pushq $99
jmp common
.align 16
entry100:
pushq $0
pushq $100
jmp common
.align 16
entry101:
pushq $0
pushq $101
jmp common
.align 16
entry102:
pushq $0
pushq $102
jmp common
.align 16
entry103:
pushq $0
pushq $103
jmp common
.align 16
entry104:
pushq $0
pushq $104
jmp common
.align 16
entry105:
pushq $0
pushq $105
jmp common
.align 16
entry106:
pushq $0
pushq $106
jmp common
.align 16
entry107:
pushq $0
pushq $107
jmp common
.align 16
entry108:
pushq $0
pushq $108
jmp common
.align 16
entry109:
pushq $0
pushq $109
jmp common
.align 16
entry110:
pushq $0
pushq $110
jmp common
.align 16
entry111:
pushq $0
pushq $111
jmp common
.align 16
entry112:
pushq $0
pushq $112
jmp common
.align 16
entry113:
pushq $0
pushq $113
jmp common
.align 16
entry114:
pushq $0
pushq $114
jmp common
.align 16
entry115:
pushq $0
pushq $115
jmp common
.align 16
entry116:
pushq $0
pushq $116
jmp common
.align 16
entry117:
pushq $0
pushq $117
jmp common
.align 16
entry118:
pushq $0
pushq $118
jmp common
.align 16
entry119:
pushq $0
pushq $119
jmp common
.align 16
entry120:
pushq $0
pushq $120
jmp common
.align 16
entry121:
pushq $0
pushq $121
jmp common
.align 16
entry122:
pushq $0
pushq $122
jmp common
.align 16
entry123:
pushq $0
pushq $123
jmp common
.align 16
entry124:
pushq $0
pushq $124
jmp common
.align 16
entry125:
pushq $0
pushq $125
jmp common
.align 16
entry126:
pushq $0
pushq $126
jmp common
.align 16
entry127:
pushq $0
pushq $127
jmp common
.align 16
entry128:
pushq $0
pushq $128
jmp common
.align 16
entry129:
pushq $0
pushq $129
jmp common
.align 16
entry130:
pushq $0
pushq $130
jmp common
.align 16
entry131:
pushq $0
pushq $131
jmp common
.align 16
entry132:
pushq $0
pushq $132
jmp common
.align 16
entry133:
pushq $0
pushq $133
jmp common
.align 16
entry134:
pushq $0
pushq $134
jmp common
.align 16
entry135:
pushq $0
pushq $135
jmp common
.align 16
entry136:
pushq $0
pushq $136
jmp common
.align 16
entry137:
pushq $0
pushq $137
jmp common
.align 16
entry138:
pushq $0
pushq $138
jmp common
.align 16
entry139:
pushq $0
pushq $139
jmp common
.align 16
entry140:
pushq $0
pushq $140
jmp common
.align 16
entry141:
pushq $0
pushq $141
jmp common
.align 16
entry142:
pushq $0
pushq $142
jmp common
.align 16
entry143:
pushq $0
pushq $143
jmp common
.align 16
entry144:
pushq $0
pushq $144
jmp common
.align 16
entry145:
pushq $0
pushq $145
jmp common
.align 16
entry146:
pushq $0
pushq $146
jmp common
.align 16
entry147:
pushq $0
pushq $147
jmp common
.align 16
entry148:
pushq $0
pushq $148
jmp common
.align 16
entry149:
pushq $0
pushq $149
jmp common
.align 16
entry150:
pushq $0
pushq $150
jmp common
.align 16
entry151:
pushq $0
pushq $151
jmp common
.align 16
entry152:
pushq $0
pushq $152
jmp common
.align 16
entry153:
pushq $0
pushq $153
jmp common
.align 16
entry154:
pushq $0
pushq $154
jmp common
.align 16
entry155:
pushq $0
pushq $155
jmp common
.align 16
entry156:
pushq $0
pushq $156
jmp common
.align 16
entry157:
pushq $0
pushq $157
jmp common
.align 16
entry158:
pushq $0
pushq $158
jmp common
.align 16
entry159:
pushq $0
pushq $159
jmp common
.align 16
entry160:
pushq $0
pushq $160
jmp common
.align 16
entry161:
pushq $0
pushq $161
jmp common
.align 16
entry162:
pushq $0
pushq $162
jmp common
.align 16
entry163:
pushq $0
pushq $163
jmp common
.align 16
entry164:
pushq $0
pushq $164
jmp common
.align 16
entry165:
pushq $0
pushq $165
jmp common
.align 16
entry166:
pushq $0
pushq $166
jmp common
.align 16
entry167:
pushq $0
pushq $167
jmp common
.align 16
entry168:
pushq $0
pushq $168
jmp common
.align 16
entry169:
pushq $0
pushq $169
jmp common
.align 16
entry170:
pushq $0
pushq $170
jmp common
.align 16
entry171:
pushq $0
pushq $171
jmp common
.align 16
entry172:
pushq $0
pushq $172
jmp common
.align 16
entry173:
pushq $0
pushq $173
jmp common
.align 16
entry174:
pushq $0
pushq $174
jmp common
.align 16
entry175:
pushq $0
pushq $175
jmp common
.align 16
entry176:
pushq $0
pushq $176
jmp common
.align 16
entry177:
pushq $0
pushq $177
jmp common
.align 16
entry178:
pushq $0
pushq $178
jmp common
.align 16
entry179:
pushq $0
pushq $179
jmp common
.align 16
entry180:
pushq $0
pushq $180
jmp common
.align 16
entry181:
pushq $0
pushq $181
jmp common
.align 16
entry182:
pushq $0
pushq $182
jmp common
.align 16
entry183:
pushq $0
pushq $183
jmp common
.align 16
entry184:
pushq $0
pushq $184
jmp common
.align 16
entry185:
pushq $0
pushq $185
jmp common
.align 16
entry186:
pushq $0
pushq $186
jmp common
.align 16
entry187:
pushq $0
pushq $187
jmp common
.align 16
entry188:
pushq $0
pushq $188
jmp common
.align 16
entry189:
pushq $0
pushq $189
jmp common
.align 16
entry190:
pushq $0
pushq $190
jmp common
.align 16
entry191:
pushq $0
pushq $191
jmp common
.align 16
entry192:
pushq $0
pushq $192
jmp common
.align 16
entry193:
pushq $0
pushq $193
jmp common
.align 16
entry194:
pushq $0
pushq $194
jmp common
.align 16
entry195:
pushq $0
pushq $195
jmp common
.align 16
entry196:
pushq $0
pushq $196
jmp common
.align 16
entry197:
pushq $0
pushq $197
jmp common
.align 16
entry198:
pushq $0
pushq $198
jmp common
.align 16
entry199:
pushq $0
pushq $199
jmp common
.align 16
entry200:
pushq $0
pushq $200
jmp common
.align 16
entry201:
pushq $0
pushq $201
jmp common
.align 16
entry202:
pushq $0
pushq $202
jmp common
.align 16
entry203:
pushq $0
pushq $203
jmp common
.align 16
entry204:
pushq $0
pushq $204
jmp common
.align 16
entry205:
pushq $0
pushq $205
jmp common
.align 16
entry206:
pushq $0
pushq $206
jmp common
.align 16
entry207:
pushq $0
pushq $207
jmp common
.align 16
entry208:
pushq $0
pushq $208
jmp common
.align 16
entry209:
pushq $0
pushq $209
jmp common
.align 16
entry210:
pushq $0
pushq $210
jmp common
.align 16
entry211:
pushq $0
pushq $211
jmp common
.align 16
entry212:
pushq $0
pushq $212
jmp common
.align 16
entry213:
pushq $0
pushq $213
jmp common
.align 16
entry214:
pushq $0
pushq $214
jmp common
.align 16
entry215:
pushq $0
pushq $215
jmp common
.align 16
entry216:
pushq $0
pushq $216
jmp common
.align 16
entry217:
pushq $0
pushq $217
jmp common
.align 16
entry218:
pushq $0
pushq $218
jmp common
.align 16
entry219:
pushq $0
pushq $219
jmp common
.align 16
entry220:
pushq $0
pushq $220
jmp common
.align 16
entry221:
pushq $0
pushq $221
jmp common
.align 16
entry222:
pushq $0
pushq $222
jmp common
.align 16
entry223:
pushq $0
pushq $223
jmp common
.align 16
entry224:
pushq $0
pushq $224
jmp common
.align 16
entry225:
pushq $0
pushq $225
jmp common
.align 16
entry226:
pushq $0
pushq $226
jmp common
.align 16
entry227:
pushq $0
pushq $227
jmp common
.align 16
entry228:
pushq $0
pushq $228
jmp common
.align 16
entry229:
pushq $0
pushq $229
jmp common
.align 16
entry230:
pushq $0
pushq $230
jmp common
.align 16
entry231:
pushq $0
pushq $231
jmp common
.align 16
entry232:
pushq $0
pushq $232
jmp common
.align 16
entry233:
pushq $0
pushq $233
jmp common
.align 16
entry234:
pushq $0
pushq $234
jmp common
.align 16
entry235:
pushq $0
pushq $235
jmp common
.align 16
entry236:
pushq $0
pushq $236
jmp common
.align 16
entry237:
pushq $0
pushq $237
jmp common
.align 16
entry238:
pushq $0
pushq $238
jmp common
.align 16
entry239:
pushq $0
pushq $239
jmp common
.align 16
entry240:
pushq $0
pushq $240
jmp common
.align 16
entry241:
pushq $0
pushq $241
jmp common
.align 16
entry242:
pushq $0
pushq $242
jmp common
.align 16
entry243:
pushq $0
pushq $243
jmp common
.align 16
entry244:
pushq $0
pushq $244
jmp common
.align 16
entry245:
pushq $0
pushq $245
jmp common
.align 16
entry246:
pushq $0
pushq $246
jmp common
.align 16
entry247:
pushq $0
pushq $247
jmp common
.align 16
entry248:
pushq $0
pushq $248
jmp common
.align 16
entry249:
pushq $0
pushq $249
jmp common
.align 16
entry250:
pushq $0
pushq $250
jmp common
.align 16
entry251:
pushq $0
pushq $251
jmp common
.align 16
entry252:
pushq $0
pushq $252
jmp common
.align 16
entry253:
pushq $0
pushq $253
jmp common
.align 16
entry254:
pushq $0
pushq $254
jmp common
.align 16
entry255:
pushq $0
pushq $255
jmp common
.align 16
isr_stubs:
.quad entry0
.quad entry1
.quad entry2
.quad entry3
.quad entry4
.quad entry5
.quad entry6
.quad entry7
.quad entry8
.quad entry9
.quad entry10
.quad entry11
.quad entry12
.quad entry13
.quad entry14
.quad entry15
.quad entry16
.quad entry17
.quad entry18
.quad entry19
.quad entry20
.quad entry21
.quad entry22
.quad entry23
.quad entry24
.quad entry25
.quad entry26
.quad entry27
.quad entry28
.quad entry29
.quad entry30
.quad entry31
.quad entry32
.quad entry33
.quad entry34
.quad entry35
.quad entry36
.quad entry37
.quad entry38
.quad entry39
.quad entry40
.quad entry41
.quad entry42
.quad entry43
.quad entry44
.quad entry45
.quad entry46
.quad entry47
.quad entry48
.quad entry49
.quad entry50
.quad entry51
.quad entry52
.quad entry53
.quad entry54
.quad entry55
.quad entry56
.quad entry57
.quad entry58
.quad entry59
.quad entry60
.quad entry61
.quad entry62
.quad entry63
.quad entry64
.quad entry65
.quad entry66
.quad entry67
.quad entry68
.quad entry69
.quad entry70
.quad entry71
.quad entry72
.quad entry73
.quad entry74
.quad entry75
.quad entry76
.quad entry77
.quad entry78
.quad entry79
.quad entry80
.quad entry81
.quad entry82
.quad entry83
.quad entry84
.quad entry85
.quad entry86
.quad entry87
.quad entry88
.quad entry89
.quad entry90
.quad entry91
.quad entry92
.quad entry93
.quad entry94
.quad entry95
.quad entry96
.quad entry97
.quad entry98
.quad entry99
.quad entry100
.quad entry101
.quad entry102
.quad entry103
.quad entry104
.quad entry105
.quad entry106
.quad entry107
.quad entry108
.quad entry109
.quad entry110
.quad entry111
.quad entry112
.quad entry113
.quad entry114
.quad entry115
.quad entry116
.quad entry117
.quad entry118
.quad entry119
.quad entry120
.quad entry121
.quad entry122
.quad entry123
.quad entry124
.quad entry125
.quad entry126
.quad entry127
.quad entry128
.quad entry129
.quad entry130
.quad entry131
.quad entry132
.quad entry133
.quad entry134
.quad entry135
.quad entry136
.quad entry137
.quad entry138
.quad entry139
.quad entry140
.quad entry141
.quad entry142
.quad entry143
.quad entry144
.quad entry145
.quad entry146
.quad entry147
.quad entry148
.quad entry149
.quad entry150
.quad entry151
.quad entry152
.quad entry153
.quad entry154
.quad entry155
.quad entry156
.quad entry157
.quad entry158
.quad entry159
.quad entry160
.quad entry161
.quad entry162
.quad entry163
.quad entry164
.quad entry165
.quad entry166
.quad entry167
.quad entry168
.quad entry169
.quad entry170
.quad entry171
.quad entry172
.quad entry173
.quad entry174
.quad entry175
.quad entry176
.quad entry177
.quad entry178
.quad entry179
.quad entry180
.quad entry181
.quad entry182
.quad entry183
.quad entry184
.quad entry185
.quad entry186
.quad entry187
.quad entry188
.quad entry189
.quad entry190
.quad entry191
.quad entry192
.quad entry193
.quad entry194
.quad entry195
.quad entry196
.quad entry197
.quad entry198
.quad entry199
.quad entry200
.quad entry201
.quad entry202
.quad entry203
.quad entry204
.quad entry205
.quad entry206
.quad entry207
.quad entry208
.quad entry209
.quad entry210
.quad entry211
.quad entry212
.quad entry213
.quad entry214
.quad entry215
.quad entry216
.quad entry217
.quad entry218
.quad entry219
.quad entry220
.quad entry221
.quad entry222
.quad entry223
.quad entry224
.quad entry225
.quad entry226
.quad entry227
.quad entry228
.quad entry229
.quad entry230
.quad entry231
.quad entry232
.quad entry233
.quad entry234
.quad entry235
.quad entry236
.quad entry237
.quad entry238
.quad entry239
.quad entry240
.quad entry241
.quad entry242
.quad entry243
.quad entry244
.quad entry245
.quad entry246
.quad entry247
.quad entry248
.quad entry249
.quad entry250
.quad entry251
.quad entry252
.quad entry253
.quad entry254
.quad entry255
|
0Leeeezy0/20th_smart_vision
| 7,618
|
libraries/sdk/utilities/fsl_memcpy.S
|
/*
* Copyright 2022 NXP
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
.syntax unified
.text
.thumb
.align 2
#ifndef MSDK_MISC_OVERRIDE_MEMCPY
#define MSDK_MISC_OVERRIDE_MEMCPY 1
#endif
/*
This mempcy function is used to replace the GCC newlib function for these purposes:
1. The newlib nano memcpy function use byte by byte copy, it is slow.
2. The newlib memcpy function for CM4, CM7, CM33 does't check address alignment,
so it may run to fault when the address is unaligned, and the memory region
is device memory, which does not support unaligned access.
This function is manually optimized base on assembly result of the c function.
The workflow is:
1. Return directly if length is 0.
2. If the source address is not 4-byte aligned, copy the unaligned part first byte by byte.
3. If the destination address is 4-byte aligned, then copy the 16-byte aligned part first,
copy 16-byte each loop, and then copy 8-byte, 4-byte, 2-byte and 1-byte.
4. If the destination address is not 4-byte aligned, load source data into register word
by word first, then store to memory based on alignement requirement. For the left part,
copy them byte by byte.
The source code of the c function is:
#define __CPY_WORD(dst, src) \
*(uint32_t *)(dst) = *(uint32_t *)(src); \
(dst) = ((uint32_t *)dst) + 1; \
(src) = ((uint32_t *)src) + 1
#define __CPY_HWORD(dst, src) \
*(uint16_t *)(dst) = *(uint16_t *)(src); \
(dst) = ((uint16_t *)dst) + 1; \
(src) = ((uint16_t *)src) + 1
#define __CPY_BYTE(dst, src) \
*(uint8_t *)(dst) = *(uint8_t *)(src); \
(dst) = ((uint8_t *)dst) + 1; \
(src) = ((uint8_t *)src) + 1
void * memcpy(void *restrict dst, const void * restrict src, size_t n)
{
void *ret = dst;
uint32_t tmp;
if (0 == n) return ret;
while (((uintptr_t)src & 0x03UL) != 0UL)
{
__CPY_BYTE(dst, src);
n--;
if (0 == n) return ret;
}
if (((uintptr_t)dst & 0x03UL) == 0UL)
{
while (n >= 16UL)
{
__CPY_WORD(dst, src);
__CPY_WORD(dst, src);
__CPY_WORD(dst, src);
__CPY_WORD(dst, src);
n-= 16UL;
}
if ((n & 0x08UL) != 0UL)
{
__CPY_WORD(dst, src);
__CPY_WORD(dst, src);
}
if ((n & 0x04UL) != 0UL)
{
__CPY_WORD(dst, src);
}
if ((n & 0x02UL) != 0UL)
{
__CPY_HWORD(dst, src);
}
if ((n & 0x01UL) != 0UL)
{
__CPY_BYTE(dst, src);
}
}
else
{
if (((uintptr_t)dst & 1UL) == 0UL)
{
while (n >= 4)
{
tmp = *(uint32_t *)src;
src = ((uint32_t *)src) + 1;
*(volatile uint16_t *)dst = (uint16_t)tmp;
dst = ((uint16_t *)dst) + 1;
*(volatile uint16_t *)dst = (uint16_t)(tmp>>16U);
dst = ((uint16_t *)dst) + 1;
n-=4;
}
}
else
{
while (n >= 4)
{
tmp = *(uint32_t *)src;
src = ((uint32_t *)src) + 1;
*(volatile uint8_t *)dst = (uint8_t)tmp;
dst = ((uint8_t *)dst) + 1;
*(volatile uint16_t *)dst = (uint16_t)(tmp>>8U);
dst = ((uint16_t *)dst) + 1;
*(volatile uint8_t *)dst = (uint8_t)(tmp>>24U);
dst = ((uint8_t *)dst) + 1;
n-=4;
}
}
while (n > 0)
{
__CPY_BYTE(dst, src);
n--;
}
}
return ret;
}
The test function is:
void test_memcpy(uint8_t *dst, const uint8_t * src, size_t n)
{
uint8_t * ds;
uint8_t * de;
const uint8_t *ss;
const uint8_t *se;
uint8_t * ret;
for (ss = src; ss < src+n; ss++)
{
for (se = ss; se < src + n; se ++)
{
size_t nn = (uintptr_t)se - (uintptr_t)ss;
for (ds = dst; ds + nn < dst+n; ds++)
{
de = ds + nn;
memset(dst, 0, n);
ret = memcpy(ds, ss, nn);
assert(ret == ds);
for (const uint8_t *data = dst; data < ds; data++)
{
assert(0 == *data);
}
for (const uint8_t *data = de; data < dst+n; data++)
{
assert(0 == *data);
}
assert(memcmp(ds, ss, nn) == 0);
}
}
}
}
test_memcpy((uint8_t *)0x20240000, (const uint8_t *)0x202C0000, 48);
*/
#if MSDK_MISC_OVERRIDE_MEMCPY
.thumb_func
.align 2
.global memcpy
.type memcpy, %function
memcpy:
push {r0, r4, r5, r6, r7, lr}
cmp r2, #0
beq ret /* If copy size is 0, return. */
src_word_unaligned:
ands r3, r1, #3 /* Make src 4-byte align. */
beq.n src_word_aligned /* src is 4-byte aligned, jump. */
ldrb r4, [r1], #1
subs r2, r2, #1 /* n-- */
strb r4, [r0], #1
beq.n ret /* n=0, return. */
b.n src_word_unaligned
src_word_aligned:
ands r3, r0, #3 /* Check dest 4-byte align. */
bne.n dst_word_unaligned
dst_word_aligned:
cmp r2, #16
blt.n size_ge_8
size_ge_16: /* size greater or equal than 16, use ldm and stm. */
subs r2, r2, #16 /* n -= 16 */
ldmia r1!, { r4, r5, r6, r7 }
cmp r2, #16
stmia r0!, { r4, r5, r6, r7 }
bcs.n size_ge_16
size_ge_8: /* size greater or equal than 8 */
lsls r3, r2, #28
itt mi
ldmiami r1!, { r4, r5 }
stmiami r0!, { r4, r5 }
size_ge_4: /* size greater or equal than 4 */
lsls r3, r2, #29
itt mi
ldrmi r4, [r1], #4
strmi r4, [r0], #4
size_ge_2: /* size greater or equal than 2 */
lsls r3, r2, #30
itt mi
ldrhmi r4, [r1], #2
strhmi r4, [r0], #2
size_ge_1: /* size greater or equal than 1 */
lsls r3, r2, #31
itt mi
ldrbmi r4, [r1]
strbmi r4, [r0]
b.n ret
dst_word_unaligned:
lsls r3, r0, #31
bmi.n dst_half_word_unaligned
dst_half_word_aligned:
cmp r2, #4
bcc.n size_lt_4
ldr r4, [r1], #4
subs r2, r2, #4
strh r4, [r0], #2
lsrs r5, r4, #16
strh r5, [r0], #2
b dst_half_word_aligned
dst_half_word_unaligned:
cmp r2, #4
bcc.n size_lt_4
ldr r4, [r1], #4
subs r2, r2, #4
strb r4, [r0], #1
lsrs r5, r4, #8
strh r5, [r0], #2
lsrs r6, r4, #24
strb r6, [r0], #1
b dst_half_word_unaligned
size_lt_4: /* size less than 4. */
cmp r2, #0
ittt ne
ldrbne r4, [r1], #1
strbne r4, [r0], #1
subne r2, r2, #1
bne size_lt_4
ret:
pop {r0, r4, r5, r6, r7, pc}
#endif /* MSDK_MISC_OVERRIDE_MEMCPY */
|
This dataset is a processed derivative of nick007x/github-code-2025.
The original data was aggregated by nick007x from public GitHub repositories. We have retained the original content, file paths, and metadata while restructuring the format for easier consumption by language-specific models.
To create this dataset, we performed the following processing on the source data:
.py, .rs, .ts) to their respective programming languages using a comprehensive extension map.The data contained in this dataset belongs to the original authors of the code repositories on GitHub.
nick007x/github-code-2025.If you use this dataset, please cite the original source:
@misc{github-code-2025,
author = {nick007x},
title = {GitHub Code 2025 Dataset},
year = {2025},
publisher = {Hugging Face},
howpublished = {\url{https://huggingface.co/datasets/nick007x/github-code-2025}}
}