improved mmu and gic code

This commit is contained in:
Wolfvak 2020-07-19 11:59:52 -03:00
parent 5905fb84fb
commit 6487307cf0
7 changed files with 353 additions and 308 deletions

View File

@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <types.h>
#include <common.h>
#include <arm.h>
#include "arm/gic.h"
@ -26,7 +26,7 @@
#define REG_GIC_CONTROL(c) (*REG_GIC(c, 0x00, u32))
#define REG_GIC_PRIOMASK(c) (*REG_GIC(c, 0x04, u32))
#define REG_GIC_POI(c) (*REG_GIC(c, 0x08, u32))
#define REG_GIC_POI(c) (*REG_GIC(c, 0x08, u32))
#define REG_GIC_IRQACK(c) (*REG_GIC(c, 0x0C, u32))
#define REG_GIC_IRQEND(c) (*REG_GIC(c, 0x10, u32))
#define REG_GIC_LASTPRIO(c) (*REG_GIC(c, 0x14, u32))
@ -38,185 +38,201 @@
/* Interrupt Distributor Registers */
#define REG_DIC(off, type) REG_ARM_PMR(0x1000 + (off), type)
#define REG_DIC_CONTROL (*REG_DIC(0x00, u32))
#define REG_DIC_TYPE (*REG_DIC(0x04, u32))
#define REG_DIC_SETENABLE REG_DIC(0x100, u32)
#define REG_DIC_CONTROL (*REG_DIC(0x00, u32))
#define REG_DIC_TYPE (*REG_DIC(0x04, u32))
#define REG_DIC_SETENABLE REG_DIC(0x100, u32) // 32 intcfg per reg
#define REG_DIC_CLRENABLE REG_DIC(0x180, u32)
#define REG_DIC_SETPENDING REG_DIC(0x200, u32)
#define REG_DIC_CLRPENDING REG_DIC(0x280, u32)
#define REG_DIC_PRIORITY REG_DIC(0x400, u8)
#define REG_DIC_TARGETPROC REG_DIC(0x800, u8)
#define REG_DIC_CFGREG REG_DIC(0xC00, u32)
#define REG_DIC_SOFTINT (*REG_DIC(0xF00, u32))
#define REG_DIC_PRIORITY REG_DIC(0x400, u8) // 1 intcfg per reg (in upper 4 bits)
#define REG_DIC_TARGETCPU REG_DIC(0x800, u8) // 1 intcfg per reg
#define REG_DIC_CFGREG REG_DIC(0xC00, u32) // 16 intcfg per reg
#define REG_DIC_SOFTINT (*REG_DIC(0xF00, u32))
// used only on reset routines
#define REG_DIC_PRIORITY32 REG_DIC(0x400, u32) // 4 intcfg per reg (in upper 4 bits)
#define REG_DIC_TARGETCPU32 REG_DIC(0x800, u32) // 4 intcfg per reg
#define GIC_PRIO_NEVER32 \
(GIC_PRIO_NEVER | (GIC_PRIO_NEVER << 8) | \
(GIC_PRIO_NEVER << 16) | (GIC_PRIO_NEVER << 24))
#define GIC_PRIO_HIGH32 \
(GIC_PRIO_HIGHEST | (GIC_PRIO_HIGHEST << 8) | \
(GIC_PRIO_HIGHEST << 16) | (GIC_PRIO_HIGHEST << 24))
/* CPU source ID is present in Interrupt Acknowledge register? */
#define IRQN_SRC_MASK (0x7 << 10)
/* Interrupt Handling */
#define LOCAL_IRQS (32)
#define DIC_MAX_IRQ (LOCAL_IRQS + MAX_IRQ)
#define IRQN_IS_LOCAL(n) ((n) < LOCAL_IRQS)
#define COREMASK_VALID(x) (((x) > 0) && ((x) < BIT(MAX_CPU)))
#define IRQN_IS_VALID(n) ((n) < DIC_MAX_IRQ)
#define LOCAL_IRQ_OFF(c, n) (((c) * LOCAL_IRQS) + (n))
#define GLOBAL_IRQ_OFF(n) (((MAX_CPU-1) * LOCAL_IRQS) + (n))
#define IRQ_TABLE_OFF(c, n) \
(IRQN_IS_LOCAL(n) ? LOCAL_IRQ_OFF((c), (n)) : GLOBAL_IRQ_OFF(n))
static gicIrqHandler gicIrqHandlers[DIC_MAX_IRQ];
static IRQ_Handler IRQ_Handlers[IRQ_TABLE_OFF(0, MAX_IRQ)];
static struct {
u8 tgt;
u8 prio;
u8 mode;
} gicIrqConfig[DIC_MAX_IRQ];
static IRQ_Handler GIC_GetCB(u32 irqn)
{
irqn &= ~(15 << 10); // clear source CPU bits
if (IRQN_IS_VALID(irqn)) {
return IRQ_Handlers[IRQ_TABLE_OFF(ARM_CoreID(), irqn)];
} else {
// Possibly have a dummy handler function that
// somehow notifies of an unhandled interrupt?
return NULL;
}
}
// gets used whenever a NULL pointer is passed to gicEnableInterrupt
static void gicDummyHandler(u32 irqn) { (void)irqn; return; }
static void GIC_SetCB(u32 irqn, u32 cpu, IRQ_Handler handler)
{
if (IRQN_IS_VALID(irqn)) {
IRQ_Handlers[IRQ_TABLE_OFF(cpu, irqn)] = handler;
}
}
static void GIC_ClearCB(u32 irqn, u32 cpu)
{
GIC_SetCB(irqn, cpu, NULL);
}
void GIC_MainHandler(void)
void gicTopHandler(void)
{
while(1) {
IRQ_Handler handler;
u32 irqn = REG_GIC_IRQACK(GIC_THIS_CPU_ALIAS);
if (irqn == GIC_IRQ_SPURIOUS)
u32 irqn;
/**
If more than one of these CPUs reads the Interrupt Acknowledge Register at the
same time, they can all acknowledge the same interrupt. The interrupt service
routine must ensure that only one of them tries to process the interrupt, with the
others returning after writing the ID to the End of Interrupt Register.
*/
irqn = REG_GIC_IRQACK(GIC_THIS_CPU_ALIAS);
if (irqn == GIC_IRQ_SPURIOUS) // no further processing is needed
break;
handler = GIC_GetCB(irqn);
if (handler != NULL)
handler(irqn);
(gicIrqHandlers[irqn & ~IRQN_SRC_MASK])(irqn);
// if the id is < 16, the source CPU can be obtained from irqn
// if the handler isn't set, it'll try to branch to 0 and trigger a prefetch abort
REG_GIC_IRQEND(GIC_THIS_CPU_ALIAS) = irqn;
}
}
void GIC_GlobalReset(void)
void gicGlobalReset(void)
{
int gicn, intn;
u32 dic_type;
unsigned gicn, intn;
// Number of local controllers
gicn = ((REG_DIC_TYPE >> 5) & 3) + 1;
dic_type = REG_DIC_TYPE;
// Number of interrupt lines (up to 224 external + 32 fixed internal)
intn = ((REG_DIC_TYPE & 7) + 1) << 5;
// number of local controllers
gicn = ((dic_type >> 5) & 3) + 1;
// number of interrupt lines (up to 224 external + 32 fixed internal per CPU)
intn = ((dic_type & 7) + 1) * 32;
// clamp it down to the amount of CPUs designed to handle
if (gicn > MAX_CPU)
gicn = MAX_CPU;
// Clear the interrupt table
for (unsigned int i = 0; i < sizeof(IRQ_Handlers)/sizeof(*IRQ_Handlers); i++)
IRQ_Handlers[i] = NULL;
// clear the interrupt handler and config table
memset(gicIrqHandlers, 0, sizeof(gicIrqHandlers));
memset(gicIrqConfig, 0, sizeof(gicIrqConfig));
// Disable all MP11 GICs
for (int i = 0; i < gicn; i++)
// disable all MP11 GICs
for (unsigned i = 0; i < gicn; i++)
REG_GIC_CONTROL(i) = 0;
// Disable the main DIC
// disable the main DIC
REG_DIC_CONTROL = 0;
// Clear all DIC interrupts
for (int i = 1; i < (intn / 32); i++) {
// clear all external interrupts
for (unsigned i = 1; i < (intn / 32); i++) {
REG_DIC_CLRENABLE[i] = ~0;
REG_DIC_CLRPENDING[i] = ~0;
}
// Reset all DIC priorities to lowest and clear target processor regs
for (int i = 32; i < intn; i++) {
REG_DIC_PRIORITY[i] = 0;
REG_DIC_TARGETPROC[i] = 0;
// reset all external priorities to highest by default
// clear target processor regs
for (unsigned i = 4; i < (intn / 4); i++) {
REG_DIC_PRIORITY32[i] = GIC_PRIO_HIGH32;
REG_DIC_TARGETCPU32[i] = 0;
}
// Set all interrupts to rising edge triggered and 1-N model
for (int i = 2; i < (intn / 16); i++)
REG_DIC_CFGREG[i] = ~0;
// set all interrupts to active level triggered in N-N model
for (unsigned i = 16; i < (intn / 16); i++)
REG_DIC_CFGREG[i] = 0;
// Enable the main DIC
// re enable the main DIC
REG_DIC_CONTROL = 1;
for (int i = 0; i < gicn; i++) {
// Compare all priority bits
for (unsigned i = 0; i < gicn; i++) {
// compare all priority bits
REG_GIC_POI(i) = 3;
// Don't mask any interrupt with low priority
// don't mask any interrupt with low priority
REG_GIC_PRIOMASK(i) = 0xF0;
// Enable the MP11 GIC
// enable all the MP11 GICs
REG_GIC_CONTROL(i) = 1;
}
}
void GIC_LocalReset(void)
void gicLocalReset(void)
{
u32 irq_s;
// Clear out local interrupt configuration bits
// disable all local interrupts
REG_DIC_CLRENABLE[0] = ~0;
REG_DIC_CLRPENDING[0] = ~0;
for (int i = 0; i < 32; i++) {
REG_DIC_PRIORITY[i] = 0;
REG_DIC_TARGETPROC[i] = 0;
for (unsigned i = 0; i < 4; i++) {
REG_DIC_PRIORITY32[i] = GIC_PRIO_HIGH32;
// local IRQs are always unmasked by default
// REG_DIC_TARGETCPU[i] = 0;
// not needed, always read as corresponding MP11 core
}
for (int i = 0; i < 2; i++)
REG_DIC_CFGREG[i] = ~0;
// Acknowledge until it gets a spurious IRQ
// ack until it gets a spurious IRQ
do {
irq_s = REG_GIC_PENDING(GIC_THIS_CPU_ALIAS);
REG_GIC_IRQEND(GIC_THIS_CPU_ALIAS) = irq_s;
} while(irq_s != GIC_IRQ_SPURIOUS);
}
int GIC_Enable(u32 irqn, u32 coremask, u32 prio, IRQ_Handler handler)
{
if (!IRQN_IS_VALID(irqn))
return -1;
static void gicSetIrqCfg(u32 irqn, u32 mode) {
u32 smt, cfg;
// in theory this should be replaced by a faster CLZ lookup
// in practice, meh, MAX_CPU will only be 2 anyway...
for (int i = 0; i < MAX_CPU; i++) {
if (coremask & BIT(i))
GIC_SetCB(irqn, i, handler);
}
REG_DIC_CLRPENDING[irqn >> 5] |= BIT(irqn & 0x1F);
REG_DIC_SETENABLE[irqn >> 5] |= BIT(irqn & 0x1F);
REG_DIC_PRIORITY[irqn] = prio << 4;
REG_DIC_TARGETPROC[irqn] = coremask;
return 0;
smt = irqn & 15;
cfg = REG_DIC_CFGREG[irqn / 16];
cfg &= ~(3 << smt);
cfg |= mode << smt;
REG_DIC_CFGREG[irqn / 16] = cfg;
}
int GIC_Disable(u32 irqn, u32 coremask)
void gicSetInterruptConfig(u32 irqn, u32 coremask, u32 prio, u32 mode, gicIrqHandler handler)
{
if (irqn >= MAX_IRQ)
return -1;
if (handler == NULL) // maybe add runtime ptr checks here too?
handler = gicDummyHandler;
for (int i = 0; i < MAX_CPU; i++) {
if (coremask & BIT(i))
GIC_ClearCB(irqn, i);
}
REG_DIC_CLRPENDING[irqn >> 5] |= BIT(irqn & 0x1F);
REG_DIC_CLRENABLE[irqn >> 5] |= BIT(irqn & 0x1F);
REG_DIC_TARGETPROC[irqn] = 0;
return 0;
gicIrqConfig[irqn].tgt = coremask;
gicIrqConfig[irqn].prio = prio;
gicIrqConfig[irqn].mode = mode;
gicIrqHandlers[irqn] = handler;
}
void GIC_TriggerSoftIRQ(u32 irqn, u32 mode, u32 coremask)
void gicClearInterruptConfig(u32 irqn)
{
REG_DIC_SOFTINT = (mode << 24) | (coremask << 16) | irqn;
memset(&gicIrqConfig[irqn], 0, sizeof(gicIrqConfig[irqn]));
gicIrqHandlers[irqn] = NULL;
}
void gicEnableInterrupt(u32 irqn)
{
REG_DIC_PRIORITY[irqn] = gicIrqConfig[irqn].prio;
REG_DIC_TARGETCPU[irqn] = gicIrqConfig[irqn].tgt;
gicSetIrqCfg(irqn, gicIrqConfig[irqn].mode);
REG_DIC_CLRPENDING[irqn / 32] |= BIT(irqn & 0x1F);
REG_DIC_SETENABLE[irqn / 32] |= BIT(irqn & 0x1F);
}
void gicDisableInterrupt(u32 irqn)
{
REG_DIC_CLRENABLE[irqn / 32] |= BIT(irqn & 0x1F);
REG_DIC_CLRPENDING[irqn / 32] |= BIT(irqn & 0x1F);
}
void gicTriggerSoftInterrupt(u32 softirq)
{
REG_DIC_SOFTINT = softirq;
}

View File

@ -1,6 +1,6 @@
/*
* This file is part of GodMode9
* Copyright (C) 2017-2019 Wolfvak
* Copyright (C) 2017-2020 Wolfvak
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -17,28 +17,96 @@
*/
#pragma once
#include <types.h>
typedef void (*IRQ_Handler)(u32 irqn);
#include <common.h>
#include <arm.h>
typedef void (*gicIrqHandler)(u32 irqn);
enum {
GIC_LEVELHIGH_NN = 0, // no interrupts use level high triggers so far
GIC_LEVELHIGH_1N = 1,
GIC_RISINGEDGE_NN = 2,
GIC_RISINGEDGE_1N = 3
// With the 1-N model, an interrupt that is taken on any CPU clears the Pending
// status on all CPUs.
// With the N-N model, all CPUs receive the interrupt independently. The Pending
// status is cleared only for the CPU that takes it, not for the other CPUs
};
enum {
GIC_PRIO0 = 0x00,
GIC_PRIO1 = 0x10,
GIC_PRIO2 = 0x20,
GIC_PRIO3 = 0x30,
GIC_PRIO4 = 0x40,
GIC_PRIO5 = 0x50,
GIC_PRIO6 = 0x60,
GIC_PRIO7 = 0x70,
GIC_PRIO14 = 0xE0,
GIC_PRIO15 = 0xF0,
};
#define GIC_PRIO_HIGHEST GIC_PRIO0
#define GIC_PRIO_LOWEST GIC_PRIO14
#define GIC_PRIO_NEVER GIC_PRIO15
void gicGlobalReset(void);
void gicLocalReset(void);
/*
Notes from https://static.docs.arm.com/ddi0360/f/DDI0360F_arm11_mpcore_r2p0_trm.pdf
INTERRUPT ENABLE:
Interrupts 0-15 fields are read as one, that is, always enabled, and write to these fields
have no effect.
Notpresent interrupts (depending on the Interrupt Controller Type Register and
interrupt number field) related fields are read as zero and writes to these fields have no
effect.
INTERRUPT PRIORITY:
The first four registers are aliased for each MP11 CPU, that is, the priority set for
ID0-15 and ID29-31 can be different for each MP11 CPU. The priority of IPIs ID0-15
depends on the receiving CPU, not the sending CPU.
INTERRUPT CPU TARGET:
For MP11 CPU n, CPU targets 29, 30 and 31 are read as (1 << n). Writes are ignored.
For IT0-IT28, these fields are read as zero and writes are ignored.
INTERRUPT CONFIGURATION:
For ID0-ID15, bit 1 of the configuration pair is always read as one, that is, rising edge
sensitive.
For ID0-ID15, bit 0 (software model) can be configured and applies to the interrupts
sent from the writing MP11 CPU.
For ID29, and ID30, the configuration pair is always read as b10, that is rising edge
sensitive and N-N software model because these IDs are allocated to timer and
watchdog interrupts that are CPU-specific
*/
#define COREMASK_ALL (BIT(MAX_CPU) - 1)
void gicSetInterruptConfig(u32 irqn, u32 coremask, u32 prio, u32 mode, gicIrqHandler handler);
void gicClearInterruptConfig(u32 irqn);
void gicEnableInterrupt(u32 irqn);
void gicDisableInterrupt(u32 irqn);
enum {
GIC_SOFTIRQ_LIST = 0,
GIC_SOFTIRQ_OTHERS = 1, // all except self
GIC_SOFTIRQ_SELF = 2,
};
#define GIC_SOFTIRQ_SOURCE(n) (((n) >> 10) & 0xF)
#define GIC_SOFTIRQ_NUMBER(n) ((n) & 0x3FF)
#define GIC_SOFTIRQ_ID(n) ((n) & 0x3FF)
enum {
GIC_SOFTIRQ_NORMAL = 0,
GIC_SOFTIRQ_NOTSELF = 1,
GIC_SOFTIRQ_SELF = 2
};
#define GIC_SOFTIRQ_FMT(id, filter, coremask) \
((id) | ((coremask) << 16) | ((filter) << 24))
// id & 0xf, coremask & 3, filter & 3
// coremask is only used with filter == GIC_SOFTIRQ_LIST
enum {
GIC_HIGHEST_PRIO = 0x0,
GIC_LOWEST_PRIO = 0xE,
};
#define GIC_SOFTIRQ_SRC(x) (((x) >> 10) % MAX_CPU)
void GIC_GlobalReset(void);
void GIC_LocalReset(void);
int GIC_Enable(u32 irqn, u32 coremask, u32 prio, IRQ_Handler handler);
int GIC_Disable(u32 irqn, u32 coremask);
void GIC_TriggerSoftIRQ(u32 irqn, u32 mode, u32 coremask);
void gicTriggerSoftInterrupt(u32 softirq);

View File

@ -54,7 +54,7 @@
#define DESCRIPTOR_TYPE_MASK (3)
enum DescriptorType {
enum {
L1_UNMAPPED,
L1_COARSE,
L1_SECTION,
@ -67,76 +67,61 @@ enum DescriptorType {
typedef struct {
u32 desc[4096];
} __attribute__((aligned(16384))) MMU_Lvl1_Table;
} __attribute__((aligned(16384))) mmuLevel1Table;
typedef struct {
u32 desc[256];
} __attribute__((aligned(1024))) MMU_Lvl2_Table;
} __attribute__((aligned(1024))) mmuLevel2Table;
static MMU_Lvl1_Table MMU_Lvl1_TT;
static mmuLevel1Table mmuGlobalTT;
/* function to allocate 2nd level page tables */
// simple watermark allocator for 2nd level page tables
#define MAX_SECOND_LEVEL (4)
static MMU_Lvl2_Table Lvl2_Tables[MAX_SECOND_LEVEL];
static u32 Lvl2_Allocated = 0;
static MMU_Lvl2_Table *Alloc_Lvl2(void)
static mmuLevel2Table mmuCoarseTables[MAX_SECOND_LEVEL];
static u32 mmuCoarseAllocated = 0;
static mmuLevel2Table *mmuAllocateLevel2Table(void)
{
if (Lvl2_Allocated == MAX_SECOND_LEVEL)
return NULL;
return &Lvl2_Tables[Lvl2_Allocated++];
return &mmuCoarseTables[mmuCoarseAllocated++];
}
/* functions to convert from internal page flag format to ARM */
// functions to convert from internal page flag format to ARM
/* {TEX, CB} */
static const u8 MMU_TypeLUT[MEMORY_TYPES][2] = {
[STRONGLY_ORDERED] = {0, 0},
[NON_CACHEABLE] = {1, 0},
[DEVICE_SHARED] = {0, 1},
[DEVICE_NONSHARED] = {2, 0},
[CACHED_WT] = {0, 2},
[CACHED_WB] = {1, 3},
[CACHED_WB_ALLOC] = {1, 3},
// {TEX, CB} pairs
static const u8 mmuTypeLUT[MMU_MEMORY_TYPES][2] = {
[MMU_STRONG_ORDER] = {0, 0},
[MMU_UNCACHEABLE] = {1, 0},
[MMU_DEV_SHARED] = {0, 1},
[MMU_DEV_NONSHARED] = {2, 0},
[MMU_CACHE_WT] = {0, 2},
[MMU_CACHE_WB] = {1, 3},
[MMU_CACHE_WBA] = {1, 3},
};
static u32 MMU_GetTEX(u32 f)
{
return MMU_TypeLUT[MMU_FLAGS_TYPE(f)][0];
}
static u32 mmuGetTEX(u32 f)
{ return mmuTypeLUT[MMU_FLAGS_TYPE(f)][0]; }
static u32 mmuGetCB(u32 f)
{ return mmuTypeLUT[MMU_FLAGS_TYPE(f)][1]; }
static u32 mmuGetNX(u32 f)
{ return MMU_FLAGS_NOEXEC(f) ? 1 : 0; }
static u32 mmuGetShared(u32 f)
{ return MMU_FLAGS_SHARED(f) ? 1 : 0; }
static u32 MMU_GetCB(u32 f)
{
return MMU_TypeLUT[MMU_FLAGS_TYPE(f)][1];
}
// access permissions
static const u8 mmuAccessLUT[MMU_ACCESS_TYPES] = {
[MMU_NO_ACCESS] = 0,
[MMU_READ_ONLY] = 0x21,
[MMU_READ_WRITE] = 0x01,
};
static u32 MMU_GetAP(u32 f)
{
switch(MMU_FLAGS_ACCESS(f)) {
default:
case NO_ACCESS:
return 0;
case READ_ONLY:
return 0x21;
case READ_WRITE:
return 0x01;
}
}
static u32 mmuGetAP(u32 f)
{ return mmuAccessLUT[MMU_FLAGS_ACCESS(f)]; }
static u32 MMU_GetNX(u32 f)
// other misc helper functions
static unsigned mmuWalkTT(u32 va)
{
return MMU_FLAGS_NOEXEC(f) ? 1 : 0;
}
static u32 MMU_GetShared(u32 f)
{
return MMU_FLAGS_SHARED(f) ? 1 : 0;
}
static enum DescriptorType MMU_WalkTT(u32 va)
{
MMU_Lvl2_Table *coarsepd;
u32 desc = MMU_Lvl1_TT.desc[L1_VA_IDX(va)];
mmuLevel2Table *coarsepd;
u32 desc = mmuGlobalTT.desc[L1_VA_IDX(va)];
switch(desc & DESCRIPTOR_TYPE_MASK) {
case DESCRIPTOR_L1_UNMAPPED:
@ -152,7 +137,7 @@ static enum DescriptorType MMU_WalkTT(u32 va)
return L1_RESERVED;
}
coarsepd = (MMU_Lvl2_Table*)(desc & COARSE_MASK);
coarsepd = (mmuLevel2Table*)(desc & COARSE_MASK);
desc = coarsepd->desc[L2_VA_IDX(va)];
switch(desc & DESCRIPTOR_TYPE_MASK) {
@ -169,21 +154,20 @@ static enum DescriptorType MMU_WalkTT(u32 va)
}
}
static MMU_Lvl2_Table *MMU_CoarseFix(u32 va)
static mmuLevel2Table *mmuCoarseFix(u32 va)
{
enum DescriptorType type;
MMU_Lvl2_Table *coarsepd;
u32 type;
mmuLevel2Table *coarsepd;
type = MMU_WalkTT(va);
type = mmuWalkTT(va);
switch(type) {
case L1_UNMAPPED:
coarsepd = Alloc_Lvl2();
if (coarsepd != NULL)
MMU_Lvl1_TT.desc[L1_VA_IDX(va)] = (u32)coarsepd | DESCRIPTOR_L1_COARSE;
coarsepd = mmuAllocateLevel2Table();
mmuGlobalTT.desc[L1_VA_IDX(va)] = (u32)coarsepd | DESCRIPTOR_L1_COARSE;
break;
case L2_UNMAPPED:
coarsepd = (MMU_Lvl2_Table*)(MMU_Lvl1_TT.desc[L1_VA_IDX(va)] & COARSE_MASK);
coarsepd = (mmuLevel2Table*)(mmuGlobalTT.desc[L1_VA_IDX(va)] & COARSE_MASK);
break;
default:
@ -196,122 +180,91 @@ static MMU_Lvl2_Table *MMU_CoarseFix(u32 va)
/* Sections */
static u32 MMU_SectionFlags(u32 f)
{
return (MMU_GetShared(f) << 16) | (MMU_GetTEX(f) << 12) |
(MMU_GetAP(f) << 10) | (MMU_GetNX(f) << 4) |
(MMU_GetCB(f) << 2) | DESCRIPTOR_L1_SECTION;
static u32 mmuSectionFlags(u32 f)
{ // converts the internal format to the hardware L1 section format
return (mmuGetShared(f) << 16) | (mmuGetTEX(f) << 12) |
(mmuGetAP(f) << 10) | (mmuGetNX(f) << 4) |
(mmuGetCB(f) << 2) | DESCRIPTOR_L1_SECTION;
}
static bool MMU_MapSection(u32 va, u32 pa, u32 flags)
static void mmuMapSection(u32 va, u32 pa, u32 flags)
{
enum DescriptorType type = MMU_WalkTT(va);
if (type == L1_UNMAPPED) {
MMU_Lvl1_TT.desc[L1_VA_IDX(va)] = pa | MMU_SectionFlags(flags);
return true;
}
return false;
}
/* Large Pages */
static u32 MMU_LargePageFlags(u32 f)
{
return (MMU_GetNX(f) << 15) | (MMU_GetTEX(f) << 12) |
(MMU_GetShared(f) << 10) | (MMU_GetAP(f) << 4) |
(MMU_GetCB(f) << 2) | DESCRIPTOR_L2_LARGEPAGE;
}
static bool MMU_MapLargePage(u32 va, u32 pa, u32 flags)
{
MMU_Lvl2_Table *l2 = MMU_CoarseFix(va);
if (l2 == NULL)
return false;
for (u32 i = va; i < (va + 0x10000); i += 0x1000)
l2->desc[L2_VA_IDX(i)] = pa | MMU_LargePageFlags(flags);
return true;
mmuGlobalTT.desc[L1_VA_IDX(va)] = pa | mmuSectionFlags(flags);
}
/* Pages */
static u32 MMU_PageFlags(u32 f)
static u32 mmuPageFlags(u32 f)
{
return (MMU_GetShared(f) << 10) | (MMU_GetTEX(f) << 6) |
(MMU_GetAP(f) << 4) | (MMU_GetCB(f) << 2) |
(MMU_GetNX(f) ? DESCRIPTOR_L2_PAGE_NX : DESCRIPTOR_L2_PAGE_EXEC);
return (mmuGetShared(f) << 10) | (mmuGetTEX(f) << 6) |
(mmuGetAP(f) << 4) | (mmuGetCB(f) << 2) |
(mmuGetNX(f) ? DESCRIPTOR_L2_PAGE_NX : DESCRIPTOR_L2_PAGE_EXEC);
}
static bool MMU_MapPage(u32 va, u32 pa, u32 flags)
static void mmuMapPage(u32 va, u32 pa, u32 flags)
{
MMU_Lvl2_Table *l2 = MMU_CoarseFix(va);
if (l2 == NULL)
return false;
l2->desc[L2_VA_IDX(va)] = pa | MMU_PageFlags(flags);
return true;
mmuLevel2Table *l2 = mmuCoarseFix(va);
l2->desc[L2_VA_IDX(va)] = pa | mmuPageFlags(flags);
}
static bool MMU_MappingFits(u32 va, u32 pa, u32 len, u32 abits)
static bool mmuMappingFits(u32 va, u32 pa, u32 sz, u32 alignment)
{
return !((va | pa | len) & (BIT(abits) - 1));
return !((va | pa | sz) & (alignment));
}
u32 MMU_Map(u32 va, u32 pa, u32 size, u32 flags)
u32 mmuMapArea(u32 va, u32 pa, u32 size, u32 flags)
{
static const struct {
u32 bits;
bool (*mapfn)(u32,u32,u32);
u32 size;
void (*mapfn)(u32,u32,u32);
} VMappers[] = {
{
.bits = SECT_ADDR_SHIFT,
.mapfn = MMU_MapSection,
.size = BIT(SECT_ADDR_SHIFT),
.mapfn = mmuMapSection,
},
{
.bits = LPAGE_ADDR_SHIFT,
.mapfn = MMU_MapLargePage,
},
{
.bits = PAGE_ADDR_SHIFT,
.mapfn = MMU_MapPage,
.size = BIT(PAGE_ADDR_SHIFT),
.mapfn = mmuMapPage,
},
};
while(size > 0) {
size_t i = 0;
for (i = 0; i < countof(VMappers); i++) {
u32 abits = VMappers[i].bits;
u32 pgsize = VMappers[i].size;
if (MMU_MappingFits(va, pa, size, abits)) {
bool mapped = (VMappers[i].mapfn)(va, pa, flags);
u32 offset = BIT(abits);
if (mmuMappingFits(va, pa, size, pgsize-1)) {
(VMappers[i].mapfn)(va, pa, flags);
// no fun allowed
if (!mapped)
return size;
va += offset;
pa += offset;
size -= offset;
va += pgsize;
pa += pgsize;
size -= pgsize;
break;
}
}
/* alternatively return the unmapped remaining size:
if (i == countof(VMappers))
return size;
*/
}
return 0;
}
void MMU_Init(void)
void mmuInvalidate(void)
{
u32 ttbr0 = (u32)(&MMU_Lvl1_TT) | 0x12;
ARM_MCR(p15, 0, 0, c8, c7, 0);
}
void mmuInvalidateVA(u32 addr)
{
ARM_MCR(p15, 0, addr, c8, c7, 2);
}
void mmuInitRegisters(void)
{
u32 ttbr0 = (u32)(&mmuGlobalTT) | 0x12;
// Set up TTBR0/1 and the TTCR
ARM_MCR(p15, 0, ttbr0, c2, c0, 0);

View File

@ -20,21 +20,22 @@
#include <types.h>
enum MMU_MemoryType {
STRONGLY_ORDERED = 0,
NON_CACHEABLE,
DEVICE_SHARED,
DEVICE_NONSHARED,
CACHED_WT,
CACHED_WB,
CACHED_WB_ALLOC,
MEMORY_TYPES,
enum {
MMU_STRONG_ORDER = 0,
MMU_UNCACHEABLE,
MMU_DEV_SHARED,
MMU_DEV_NONSHARED,
MMU_CACHE_WT,
MMU_CACHE_WB,
MMU_CACHE_WBA,
MMU_MEMORY_TYPES,
};
enum MMU_MemoryAccess {
NO_ACCESS = 0,
READ_ONLY,
READ_WRITE,
enum {
MMU_NO_ACCESS = 0,
MMU_READ_ONLY,
MMU_READ_WRITE,
MMU_ACCESS_TYPES,
};
#define MMU_FLAGS(t, ap, nx, s) ((s) << 25 | (nx) << 24 | (ap) << 8 | (t))
@ -45,5 +46,9 @@ enum MMU_MemoryAccess {
#define MMU_FLAGS_NOEXEC(f) ((f) & BIT(24))
#define MMU_FLAGS_SHARED(f) ((f) & BIT(25))
u32 MMU_Map(u32 va, u32 pa, u32 size, u32 flags);
void MMU_Init(void);
u32 mmuMapArea(u32 va, u32 pa, u32 size, u32 flags);
void mmuInvalidate(void);
void mmuInvalidateVA(u32 addr); // DO NOT USE
void mmuInitRegisters(void);

View File

@ -109,7 +109,7 @@ XRQ_IRQ:
sub sp, sp, r4
mov lr, pc
ldr pc, =GIC_MainHandler
ldr pc, =gicTopHandler
add sp, sp, r4

View File

@ -185,14 +185,15 @@ void __attribute__((noreturn)) MainLoop(void)
// clear up the shared memory section
memset(&SharedMemoryState, 0, sizeof(SharedMemoryState));
// enable PXI RX interrupt
GIC_Enable(PXI_RX_INTERRUPT, BIT(0), GIC_HIGHEST_PRIO + 2, PXI_RX_Handler);
// configure interrupts
gicSetInterruptConfig(PXI_RX_INTERRUPT, BIT(0), GIC_PRIO2, GIC_RISINGEDGE_1N, PXI_RX_Handler);
gicSetInterruptConfig(MCU_INTERRUPT, BIT(0), GIC_PRIO1, GIC_RISINGEDGE_1N, MCU_HandleInterrupts);
gicSetInterruptConfig(VBLANK_INTERRUPT, BIT(0), GIC_PRIO0, GIC_RISINGEDGE_1N, VBlank_Handler);
// enable MCU interrupts
GIC_Enable(MCU_INTERRUPT, BIT(0), GIC_HIGHEST_PRIO + 1, MCU_HandleInterrupts);
// set up VBlank interrupt to always have the highest priority
GIC_Enable(VBLANK_INTERRUPT, BIT(0), GIC_HIGHEST_PRIO, VBlank_Handler);
// enable interrupts
gicEnableInterrupt(PXI_RX_INTERRUPT);
gicEnableInterrupt(MCU_INTERRUPT);
gicEnableInterrupt(VBLANK_INTERRUPT);
// ARM9 won't try anything funny until this point
PXI_Barrier(ARM11_READY_BARRIER);

View File

@ -57,18 +57,20 @@ static void SYS_EnableClkMult(void)
// state might get a bit messed up so it has to be done
// as early as possible in the initialization chain
if (SYS_IsNewConsole() && !SYS_ClkMultEnabled()) {
GIC_Enable(88, BIT(0), GIC_HIGHEST_PRIO, NULL);
gicSetInterruptConfig(88, BIT(0), GIC_PRIO_HIGHEST, GIC_RISINGEDGE_1N, NULL);
gicEnableInterrupt(88);
*CFG11_MPCORE_CLKCNT = 0x8001;
do {
ARM_WFI();
} while(!(*CFG11_MPCORE_CLKCNT & 0x8000));
GIC_Disable(88, BIT(0));
gicDisableInterrupt(88);
gicClearInterruptConfig(88);
}
}
void SYS_CoreZeroInit(void)
{
GIC_GlobalReset();
gicGlobalReset();
*LEGACY_BOOT_ENTRYPOINT = 0;
@ -77,27 +79,27 @@ void SYS_CoreZeroInit(void)
SCU_Init();
// Map all sections here
MMU_Map(SECTION_TRI(vector), MMU_FLAGS(CACHED_WT, READ_ONLY, 0, 0));
MMU_Map(SECTION_TRI(text), MMU_FLAGS(CACHED_WT, READ_ONLY, 0, 1));
MMU_Map(SECTION_TRI(data), MMU_FLAGS(CACHED_WB_ALLOC, READ_WRITE, 1, 1));
MMU_Map(SECTION_TRI(rodata), MMU_FLAGS(CACHED_WT, READ_ONLY, 1, 1));
MMU_Map(SECTION_TRI(bss), MMU_FLAGS(CACHED_WB_ALLOC, READ_WRITE, 1, 1));
MMU_Map(SECTION_TRI(shared), MMU_FLAGS(STRONGLY_ORDERED, READ_WRITE, 1, 1));
mmuMapArea(SECTION_TRI(vector), MMU_FLAGS(MMU_CACHE_WT, MMU_READ_ONLY, 0, 0));
mmuMapArea(SECTION_TRI(text), MMU_FLAGS(MMU_CACHE_WT, MMU_READ_ONLY, 0, 1));
mmuMapArea(SECTION_TRI(data), MMU_FLAGS(MMU_CACHE_WBA, MMU_READ_WRITE, 1, 1));
mmuMapArea(SECTION_TRI(rodata), MMU_FLAGS(MMU_CACHE_WT, MMU_READ_ONLY, 1, 1));
mmuMapArea(SECTION_TRI(bss), MMU_FLAGS(MMU_CACHE_WBA, MMU_READ_WRITE, 1, 1));
mmuMapArea(SECTION_TRI(shared), MMU_FLAGS(MMU_STRONG_ORDER, MMU_READ_WRITE, 1, 1));
// IO Registers
MMU_Map(0x10100000, 0x10100000, 4UL << 20, MMU_FLAGS(DEVICE_SHARED, READ_WRITE, 1, 1));
mmuMapArea(0x10100000, 0x10100000, 4UL << 20, MMU_FLAGS(MMU_DEV_SHARED, MMU_READ_WRITE, 1, 1));
// MPCore Private Memory Region
MMU_Map(0x17E00000, 0x17E00000, 8UL << 10, MMU_FLAGS(DEVICE_SHARED, READ_WRITE, 1, 1));
mmuMapArea(0x17E00000, 0x17E00000, 8UL << 10, MMU_FLAGS(MMU_DEV_SHARED, MMU_READ_WRITE, 1, 1));
// VRAM
MMU_Map(0x18000000, 0x18000000, 6UL << 20, MMU_FLAGS(CACHED_WT, READ_WRITE, 1, 1));
mmuMapArea(0x18000000, 0x18000000, 6UL << 20, MMU_FLAGS(MMU_CACHE_WT, MMU_READ_WRITE, 1, 1));
// FCRAM
if (SYS_IsNewConsole()) {
MMU_Map(0x20000000, 0x20000000, 256UL << 20, MMU_FLAGS(CACHED_WB, READ_WRITE, 1, 1));
mmuMapArea(0x20000000, 0x20000000, 256UL << 20, MMU_FLAGS(MMU_CACHE_WB, MMU_READ_WRITE, 1, 1));
} else {
MMU_Map(0x20000000, 0x20000000, 128UL << 20, MMU_FLAGS(CACHED_WB, READ_WRITE, 1, 1));
mmuMapArea(0x20000000, 0x20000000, 128UL << 20, MMU_FLAGS(MMU_CACHE_WB, MMU_READ_WRITE, 1, 1));
}
// Initialize peripherals
@ -115,10 +117,10 @@ void SYS_CoreZeroInit(void)
void SYS_CoreInit(void)
{
// Reset local GIC registers
GIC_LocalReset();
gicLocalReset();
// Set up MMU registers
MMU_Init();
mmuInitRegisters();
// Enable fancy ARM11 features
ARM_SetACR(ARM_GetACR() |
@ -135,7 +137,7 @@ void SYS_CoreInit(void)
void SYS_CoreZeroShutdown(void)
{
ARM_DisableInterrupts();
GIC_GlobalReset();
gicGlobalReset();
}
void __attribute__((noreturn)) SYS_CoreShutdown(void)
@ -144,7 +146,7 @@ void __attribute__((noreturn)) SYS_CoreShutdown(void)
ARM_DisableInterrupts();
GIC_LocalReset();
gicLocalReset();
ARM_WbInvDC();
ARM_InvIC();