1
0
rocket-chip/src/main/scala/uncore/devices/Plic.scala

241 lines
8.1 KiB
Scala
Raw Normal View History

// See LICENSE.SiFive for license details.
2016-05-10 09:25:13 +02:00
package uncore.devices
2016-05-10 09:25:13 +02:00
import Chisel._
import Chisel.ImplicitConversions._
import junctions._
2016-10-29 06:20:49 +02:00
import diplomacy._
2016-10-26 22:52:23 +02:00
import regmapper._
import uncore.tilelink2._
import config._
2016-10-29 06:20:49 +02:00
import scala.math.min
Heterogeneous Tiles (#550) Fundamental new features: * Added tile package: This package is intended to hold components re-usable across different types of tile. Will be the future location of TL2-RoCC accelerators and new diplomatic versions of intra-tile interfaces. * Adopted [ModuleName]Params convention: Code base was very inconsistent about what to name case classes that provide parameters to modules. Settled on calling them [ModuleName]Params to distinguish them from config.Parameters and config.Config. So far applied mostly only to case classes defined within rocket and tile. * Defined RocketTileParams: A nested case class containing case classes for all the components of a tile (L1 caches and core). Allows all such parameters to vary per-tile. * Defined RocketCoreParams: All the parameters that can be varied per-core. * Defined L1CacheParams: A trait defining the parameters common to L1 caches, made concrete in different derived case classes. * Defined RocketTilesKey: A sequence of RocketTileParams, one for every tile to be created. * Provided HeterogeneousDualCoreConfig: An example of making a heterogeneous chip with two cores, one big and one little. * Changes to legacy code: ReplacementPolicy moved to package util. L1Metadata moved to package tile. Legacy L2 cache agent removed because it can no longer share the metadata array implementation with the L1. Legacy GroundTests on life support. Additional changes that got rolled in along the way: * rocket: Fix critical path through BTB for I$ index bits > pgIdxBits * coreplex: tiles connected via :=* * groundtest: updated to use TileParams * tilelink: cache cork requirements are relaxed to allow more cacheless masters
2017-02-09 22:59:09 +01:00
import tile.XLen
2016-05-10 09:25:13 +02:00
class GatewayPLICIO extends Bundle {
val valid = Bool(OUTPUT)
val ready = Bool(INPUT)
val complete = Bool(INPUT)
}
class LevelGateway extends Module {
val io = new Bundle {
val interrupt = Bool(INPUT)
val plic = new GatewayPLICIO
}
val inFlight = Reg(init=Bool(false))
when (io.interrupt && io.plic.ready) { inFlight := true }
when (io.plic.complete) { inFlight := false }
io.plic.valid := io.interrupt && !inFlight
}
object PLICConsts
{
2016-05-10 09:25:13 +02:00
def maxDevices = 1023
2016-06-06 05:33:51 +02:00
def maxHarts = 15872
2016-10-26 22:52:23 +02:00
def priorityBase = 0x0
2016-06-06 05:33:51 +02:00
def pendingBase = 0x1000
def enableBase = 0x2000
def hartBase = 0x200000
def claimOffset = 4
def priorityBytes = 4
2016-05-10 09:25:13 +02:00
def enableOffset(i: Int) = i * ((maxDevices+7)/8)
def hartOffset(i: Int) = i * 0x1000
def enableBase(i: Int):Int = enableOffset(i) + enableBase
def hartBase(i: Int):Int = hartOffset(i) + hartBase
def size = hartBase(maxHarts)
require(hartBase >= enableBase(maxHarts))
}
2016-10-29 06:20:49 +02:00
/** Platform-Level Interrupt Controller */
class TLPLIC(supervisor: Boolean, maxPriorities: Int, address: BigInt = 0xC000000)(implicit p: Parameters) extends LazyModule
2016-10-29 06:20:49 +02:00
{
val contextsPerHart = if (supervisor) 2 else 1
require (maxPriorities >= 0)
2017-03-01 08:12:36 +01:00
// plic0 => max devices 1023
val device = new SimpleDevice("interrupt-controller", Seq("riscv,plic0")) {
override val alwaysExtended = true
override def describe(resources: ResourceBindings): Description = {
val Description(name, mapping) = super.describe(resources)
val extra = Map(
"interrupt-controller" -> Nil,
"riscv,ndev" -> Seq(ResourceInt(nDevices)),
"#interrupt-cells" -> Seq(ResourceInt(1)),
"#address-cells" -> Seq(ResourceInt(0)))
Description(name, mapping ++ extra)
}
}
2016-10-29 06:20:49 +02:00
val node = TLRegisterNode(
address = AddressSet(address, PLICConsts.size-1),
2017-03-01 08:12:36 +01:00
device = device,
Heterogeneous Tiles (#550) Fundamental new features: * Added tile package: This package is intended to hold components re-usable across different types of tile. Will be the future location of TL2-RoCC accelerators and new diplomatic versions of intra-tile interfaces. * Adopted [ModuleName]Params convention: Code base was very inconsistent about what to name case classes that provide parameters to modules. Settled on calling them [ModuleName]Params to distinguish them from config.Parameters and config.Config. So far applied mostly only to case classes defined within rocket and tile. * Defined RocketTileParams: A nested case class containing case classes for all the components of a tile (L1 caches and core). Allows all such parameters to vary per-tile. * Defined RocketCoreParams: All the parameters that can be varied per-core. * Defined L1CacheParams: A trait defining the parameters common to L1 caches, made concrete in different derived case classes. * Defined RocketTilesKey: A sequence of RocketTileParams, one for every tile to be created. * Provided HeterogeneousDualCoreConfig: An example of making a heterogeneous chip with two cores, one big and one little. * Changes to legacy code: ReplacementPolicy moved to package util. L1Metadata moved to package tile. Legacy L2 cache agent removed because it can no longer share the metadata array implementation with the L1. Legacy GroundTests on life support. Additional changes that got rolled in along the way: * rocket: Fix critical path through BTB for I$ index bits > pgIdxBits * coreplex: tiles connected via :=* * groundtest: updated to use TileParams * tilelink: cache cork requirements are relaxed to allow more cacheless masters
2017-02-09 22:59:09 +01:00
beatBytes = p(XLen)/8,
2016-10-29 06:20:49 +02:00
undefZero = false)
val intnode = IntNexusNode(
2016-10-29 06:20:49 +02:00
numSourcePorts = 0 to 1024,
numSinkPorts = 0 to 1024,
2017-03-01 08:12:36 +01:00
sourceFn = { _ => IntSourcePortParameters(Seq(IntSourceParameters(contextsPerHart, Seq(Resource(device, "int"))))) },
2016-10-29 06:20:49 +02:00
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) })
/* Negotiated sizes */
2017-03-01 08:12:36 +01:00
def nDevices: Int = intnode.edgesIn.map(_.source.num).sum
def nPriorities = min(maxPriorities, nDevices)
def nHarts = intnode.edgesOut.map(_.source.num).sum
def context(i: Int, mode: Char) = mode match {
case 'M' => i * contextsPerHart
case 'S' => require(supervisor); i * contextsPerHart + 1
}
def claimAddr(i: Int, mode: Char) = address + PLICConsts.hartBase(context(i, mode)) + PLICConsts.claimOffset
def threshAddr(i: Int, mode: Char) = address + PLICConsts.hartBase(context(i, mode))
def enableAddr(i: Int, mode: Char) = address + PLICConsts.enableBase(context(i, mode))
// Create the global PLIC config string
lazy val globalConfigString = Seq(
s"plic {\n",
s" priority 0x${address.toString(16)};\n",
s" pending 0x${(address + PLICConsts.pendingBase).toString(16)};\n",
s" ndevs ${nDevices};\n",
s"};\n").mkString
// Create the per-Hart config string
lazy val hartConfigStrings = Seq.tabulate(intnode.edgesOut.size) { i => (Seq(
s" plic {\n",
s" m {\n",
s" ie 0x${enableAddr(i, 'M').toString(16)};\n",
s" thresh 0x${threshAddr(i, 'M').toString(16)};\n",
s" claim 0x${claimAddr(i, 'M').toString(16)};\n",
s" };\n") ++ (if (!supervisor) Seq() else Seq(
s" s {\n",
s" ie 0x${enableAddr(i, 'S').toString(16)};\n",
s" thresh 0x${threshAddr(i, 'S').toString(16)};\n",
s" claim 0x${claimAddr(i, 'S').toString(16)};\n",
s" };\n")) ++ Seq(
s" };\n")).mkString
}
2017-03-01 08:12:36 +01:00
// Assign all the devices unique ranges
lazy val sources = intnode.edgesIn.map(_.source)
lazy val flatSources = (sources zip sources.map(_.num).scanLeft(0)(_+_).init).map {
case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o)))
}.flatten
ResourceBinding {
flatSources.foreach { s => s.resources.foreach { r =>
// +1 because interrupt 0 is reserved
(s.range.start until s.range.end).foreach { i => r.bind(device, ResourceInt(i+1)) }
} }
}
2016-10-29 06:20:49 +02:00
lazy val module = new LazyModuleImp(this) {
val io = new Bundle {
val tl_in = node.bundleIn
val devices = intnode.bundleIn
val harts = intnode.bundleOut
}
// Compact the interrupt vector the same way
val interrupts = (intnode.edgesIn zip io.devices).map { case (e, i) => i.take(e.source.num) }.flatten
// This flattens the harts into an MSMSMSMSMS... or MMMMM.... sequence
val harts = io.harts.flatten
println("\nInterrupt map:")
flatSources.foreach { s =>
// +1 because 0 is reserved, +1-1 because the range is half-open
println(s" [${s.range.start+1}, ${s.range.end}] => ${s.name}")
2016-10-29 06:20:49 +02:00
}
require (nDevices == interrupts.size)
require (nHarts == harts.size)
2016-10-29 06:20:49 +02:00
require(nDevices <= PLICConsts.maxDevices)
require(nHarts > 0 && nHarts <= PLICConsts.maxHarts)
// For now, use LevelGateways for all TL2 interrupts
val gateways = Vec(interrupts.map { case i =>
val gateway = Module(new LevelGateway)
gateway.io.interrupt := i
gateway.io.plic
2016-11-19 03:07:44 +01:00
} ++ (if (interrupts.isEmpty) Some(Wire(new GatewayPLICIO)) else None))
2016-10-29 06:20:49 +02:00
val priority =
if (nPriorities > 0) Reg(Vec(nDevices+1, UInt(width=log2Up(nPriorities+1))))
else Wire(init=Vec.fill(nDevices+1)(UInt(1)))
val threshold =
if (nPriorities > 0) Reg(Vec(nHarts, UInt(width = log2Up(nPriorities+1))))
else Wire(init=Vec.fill(nHarts)(UInt(0)))
val pending = Reg(init=Vec.fill(nDevices+1){Bool(false)})
val enables = Reg(Vec(nHarts, Vec(nDevices+1, Bool())))
for ((p, g) <- pending.tail zip gateways) {
g.ready := !p
g.complete := false
when (g.valid) { p := true }
}
def findMax(x: Seq[UInt]): (UInt, UInt) = {
if (x.length > 1) {
val half = 1 << (log2Ceil(x.length) - 1)
val lMax = findMax(x take half)
val rMax = findMax(x drop half)
val useLeft = lMax._1 >= rMax._1
(Mux(useLeft, lMax._1, rMax._1), Mux(useLeft, lMax._2, UInt(half) | rMax._2))
} else (x.head, UInt(0))
}
val maxDevs = Reg(Vec(nHarts, UInt(width = log2Up(pending.size))))
for (hart <- 0 until nHarts) {
val effectivePriority =
for (((p, en), pri) <- (pending zip enables(hart) zip priority).tail)
yield Cat(p && en, pri)
val (maxPri, maxDev) = findMax((UInt(1) << priority(0).getWidth) +: effectivePriority)
maxDevs(hart) := maxDev
harts(hart) := Reg(next = maxPri) > Cat(UInt(1), threshold(hart))
}
def priorityRegField(x: UInt) = if (nPriorities > 0) RegField(32, x) else RegField.r(32, x)
val priorityRegFields = Seq(PLICConsts.priorityBase -> priority.map(p => priorityRegField(p)))
val pendingRegFields = Seq(PLICConsts.pendingBase -> pending .map(b => RegField.r(1, b)))
val enableRegFields = enables.zipWithIndex.map { case (e, i) =>
PLICConsts.enableBase(i) -> e.map(b => RegField(1, b))
}
val hartRegFields = Seq.tabulate(nHarts) { i =>
PLICConsts.hartBase(i) -> Seq(
priorityRegField(threshold(i)),
RegField(32,
RegReadFn { valid =>
when (valid) {
pending(maxDevs(i)) := Bool(false)
maxDevs(i) := UInt(0) // flush pipeline
}
(Bool(true), maxDevs(i))
},
RegWriteFn { (valid, data) =>
when (valid && enables(i)(data)) {
gateways(data - UInt(1)).complete := Bool(true)
}
Bool(true)
2016-10-26 22:52:23 +02:00
}
2016-10-29 06:20:49 +02:00
)
2016-10-26 22:52:23 +02:00
)
2016-10-29 06:20:49 +02:00
}
2016-05-10 09:25:13 +02:00
2016-10-29 06:20:49 +02:00
node.regmap((priorityRegFields ++ pendingRegFields ++ enableRegFields ++ hartRegFields):_*)
2016-10-26 22:52:23 +02:00
2016-10-29 06:20:49 +02:00
priority(0) := 0
pending(0) := false
for (e <- enables)
e(0) := false
}
2016-05-10 09:25:13 +02:00
}