1
0

WIP on new memory map

This commit is contained in:
Andrew Waterman
2016-04-27 14:57:54 -07:00
parent 48170fd9aa
commit 1f211b37df
17 changed files with 110 additions and 466 deletions

View File

@ -25,24 +25,23 @@ class DefaultConfig extends Config (
type PF = PartialFunction[Any,Any]
def findBy(sname:Any):Any = here[PF](site[Any](sname))(pname)
def genCsrAddrMap: AddrMap = {
val deviceTree = AddrMapEntry("devicetree", None, MemSize(1 << 15, AddrMapConsts.R))
val csrSize = (1 << 12) * (site(XLen) / 8)
val csrs = (0 until site(NTiles)).map{ i =>
AddrMapEntry(s"csr$i", None, MemSize(csrSize, AddrMapConsts.RW))
}
val scrSize = site(HtifKey).nSCR * (site(XLen) / 8)
val scr = AddrMapEntry("scr", None, MemSize(scrSize, AddrMapConsts.RW))
new AddrMap(deviceTree +: csrs :+ scr)
val deviceTree = AddrMapEntry("devicetree", MemSize(1 << 15, AddrMapConsts.R))
val rtc = AddrMapEntry("rtc", MemSize(1 << 12, AddrMapConsts.RW))
new AddrMap(Seq(deviceTree, rtc))
}
def makeConfigString() = {
val addrMap = new AddrHashMap(site(GlobalAddrMap))
val xLen = site(XLen)
val res = new StringBuilder
val memSize = addrMap(s"mem").size
val memSize = addrMap("mem").size
val rtcAddr = addrMap("conf:rtc").start
res append "platform {\n"
res append " vendor ucb;\n"
res append " arch rocket;\n"
res append "};\n"
res append "rtc {\n"
res append s" addr 0x${rtcAddr.toString(16)};\n"
res append "};\n"
res append "ram {\n"
res append " 0 {\n"
res append " addr 0;\n"
@ -51,11 +50,11 @@ class DefaultConfig extends Config (
res append "};\n"
res append "core {\n"
for (i <- 0 until site(NTiles)) {
val csrAddr = addrMap(s"conf:csr$i").start
val timecmpAddr = rtcAddr + 8*(i+1)
res append s" $i {\n"
res append " 0 {\n"
res append s" isa rv$xLen;\n"
res append s" addr 0x${csrAddr.toString(16)};\n"
res append s" timecmp 0x${timecmpAddr.toString(16)};\n"
res append " };\n"
res append " };\n"
}
@ -187,7 +186,7 @@ class DefaultConfig extends Config (
case LNEndpoints => site(TLKey(site(TLId))).nManagers + site(TLKey(site(TLId))).nClients
case LNHeaderBits => log2Ceil(site(TLKey(site(TLId))).nManagers) +
log2Up(site(TLKey(site(TLId))).nClients)
case ExtraL1Clients => 2 // RTC and HTIF
case ExtraL1Clients => 1 // HTIF // TODO not really a parameter
case TLKey("L1toL2") =>
TileLinkParameters(
coherencePolicy = new MESICoherence(site(L2DirectoryRepresentation)),
@ -201,9 +200,7 @@ class DefaultConfig extends Config (
// L1 cache
site(NMSHRs) + 1,
// RoCC
if (site(BuildRoCC).isEmpty) 1 else site(RoccMaxTaggedMemXacts),
// RTC
site(NTiles)),
if (site(BuildRoCC).isEmpty) 1 else site(RoccMaxTaggedMemXacts)),
maxClientsPerPort = if (site(BuildRoCC).isEmpty) 1 else 2,
maxManagerXacts = site(NAcquireTransactors) + 2,
dataBits = site(CacheBlockBytes)*8)
@ -245,18 +242,15 @@ class DefaultConfig extends Config (
case BankIdLSB => 0
case CacheBlockBytes => Dump("CACHE_BLOCK_BYTES", 64)
case CacheBlockOffsetBits => log2Up(here(CacheBlockBytes))
case UseBackupMemoryPort => false
case UseHtifClockDiv => true
case ConfigString => makeConfigString()
case GlobalAddrMap => {
val memsize = BigInt(1L << 30)
Dump("MEM_SIZE", memsize)
AddrMap(
AddrMapEntry("mem", None, MemSize(memsize, AddrMapConsts.RWX, true)),
AddrMapEntry("conf", None,
MemSubmap(BigInt(1L << 30), genCsrAddrMap)),
AddrMapEntry("devices", None,
MemSubmap(BigInt(1L << 31), site(GlobalDeviceSet).getAddrMap)))
AddrMapEntry("mem", MemSize(memsize, AddrMapConsts.RWX, true)),
AddrMapEntry("conf", MemSubmap(BigInt(1L << 30), genCsrAddrMap)),
AddrMapEntry("devices", MemSubmap(BigInt(1L << 31), site(GlobalDeviceSet).getAddrMap)))
}
case GlobalDeviceSet => {
val devset = new DeviceSet

View File

@ -29,7 +29,7 @@ class DeviceSet {
val entries = devices.map { case Device(name, size, _, readable, writeable) =>
val prot = (if (readable) R else 0) | (if (writeable) W else 0)
val realsize = roundup(size)
new AddrMapEntry(name, None, new MemSize(realsize, prot))
new AddrMapEntry(name, new MemSize(realsize, prot))
}
new AddrMap(entries)
}

View File

@ -25,8 +25,6 @@ case object MemoryChannelMuxConfigs extends Field[List[Int]]
case object BankIdLSB extends Field[Int]
/** Number of outstanding memory requests */
case object NOutstandingMemReqsPerChannel extends Field[Int]
/** Whether to use the slow backup memory port [VLSI] */
case object UseBackupMemoryPort extends Field[Boolean]
/** Whether to divide HTIF clock */
case object UseHtifClockDiv extends Field[Boolean]
/** Function for building some kind of coherence manager agent */
@ -78,7 +76,6 @@ class MemBackupCtrlIO extends Bundle {
class BasicTopIO(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasTopLevelParameters {
val host = new HostIO(htifW)
val mem_backup_ctrl = new MemBackupCtrlIO
}
class TopIO(implicit p: Parameters) extends BasicTopIO()(p) {
@ -95,6 +92,11 @@ object TopUtils {
inner.r <> Queue(outer.r, mifDataBeats)
inner.b <> Queue(outer.b)
}
def connectTilelinkNasti(nasti: NastiIO, tl: ClientUncachedTileLinkIO)(implicit p: Parameters) = {
val conv = Module(new NastiIOTileLinkIOConverter())
conv.io.tl <> tl
TopUtils.connectNasti(nasti, conv.io.nasti)
}
}
/** Top-level module for the chip */
@ -111,19 +113,17 @@ class Top(topParams: Parameters) extends Module with HasTopLevelParameters {
// Connect each tile to the HTIF
uncore.io.htif.zip(tileList).zipWithIndex.foreach {
case ((hl, tile), i) =>
tile.io.host.timerIRQ := uncore.io.timerIRQs(i)
tile.io.host.id := UInt(i)
tile.io.host.reset := Reg(next=Reg(next=hl.reset))
tile.io.host.csr.req <> Queue(hl.csr.req)
hl.csr.resp <> Queue(tile.io.host.csr.resp)
hl.debug_stats_csr := tile.io.host.debug_stats_csr
}
// Connect the uncore to the tile memory ports, HostIO and MemIO
uncore.io.tiles_cached <> tileList.map(_.io.cached).flatten
uncore.io.tiles_uncached <> tileList.map(_.io.uncached).flatten
io.host <> uncore.io.host
if (p(UseBackupMemoryPort)) { io.mem_backup_ctrl <> uncore.io.mem_backup_ctrl }
else { uncore.io.mem_backup_ctrl.en := Bool(false) }
io.mem.zip(uncore.io.mem).foreach { case (outer, inner) =>
TopUtils.connectNasti(outer, inner)
@ -146,7 +146,7 @@ class Uncore(implicit val p: Parameters) extends Module
val tiles_cached = Vec(nCachedTilePorts, new ClientTileLinkIO).flip
val tiles_uncached = Vec(nUncachedTilePorts, new ClientUncachedTileLinkIO).flip
val htif = Vec(nTiles, new HtifIO).flip
val mem_backup_ctrl = new MemBackupCtrlIO
val timerIRQs = Vec(nTiles, Bool()).asOutput
val mmio = new NastiIO
}
@ -160,25 +160,20 @@ class Uncore(implicit val p: Parameters) extends Module
for (i <- 0 until nTiles) {
io.htif(i).reset := htif.io.cpu(i).reset
io.htif(i).id := htif.io.cpu(i).id
htif.io.cpu(i).debug_stats_csr <> io.htif(i).debug_stats_csr
val csr_arb = Module(new SmiArbiter(2, xLen, csrAddrBits))
csr_arb.io.in(0) <> htif.io.cpu(i).csr
csr_arb.io.in(1) <> outmemsys.io.csr(i)
io.htif(i).csr <> csr_arb.io.out
io.htif(i).csr <> htif.io.cpu(i).csr
}
// Arbitrate SCR access between MMIO and HTIF
val addrHashMap = new AddrHashMap(p(GlobalAddrMap))
val scrFile = Module(new SCRFile("UNCORE_SCR",addrHashMap("conf:scr").start))
val scrArb = Module(new SmiArbiter(2, scrDataBits, scrAddrBits))
scrArb.io.in(0) <> htif.io.scr
scrArb.io.in(1) <> outmemsys.io.scr
scrFile.io.smi <> scrArb.io.out
val addrMap = p(GlobalAddrMap)
val addrHashMap = new AddrHashMap(addrMap)
val memSize = addrHashMap("mem").size
val scrFile = Module(new SCRFile("UNCORE_SCR", 0))
scrFile.io.smi <> htif.io.scr
scrFile.io.scr.attach(Wire(init = UInt(nTiles)), "N_CORES")
scrFile.io.scr.attach(Wire(init = UInt(addrHashMap("mem").size >> 20)), "MMIO_BASE")
scrFile.io.scr.attach(Wire(init = UInt(memSize >> 20)), "MMIO_BASE")
// scrFile.io.scr <> (... your SCR connections ...)
buildMMIONetwork(p.alterPartial({case TLId => "MMIO_Outermost"}))
// Configures the enabled memory channels. This can't be changed while the
// chip is actively using memory, as it both drops Nasti messages and garbles
// all of memory.
@ -187,19 +182,40 @@ class Uncore(implicit val p: Parameters) extends Module
"MEMORY_CHANNEL_MUX_SELECT")
outmemsys.io.memory_channel_mux_select := memory_channel_mux_select
val deviceTree = Module(new NastiROM(p(ConfigString).toSeq))
deviceTree.io <> outmemsys.io.deviceTree
// Wire the htif to the memory port(s) and host interface
io.host.debug_stats_csr := htif.io.host.debug_stats_csr
io.mem <> outmemsys.io.mem
if(p(UseHtifClockDiv)) {
outmemsys.io.mem_backup_en := io.mem_backup_ctrl.en
VLSIUtils.padOutHTIFWithDividedClock(htif.io.host, scrFile.io.scr,
outmemsys.io.mem_backup, io.mem_backup_ctrl, io.host, htifW)
VLSIUtils.padOutHTIFWithDividedClock(htif.io.host, scrFile.io.scr, io.host, htifW)
} else {
io.host.out <> htif.io.host.out
htif.io.host.in <> io.host.in
io.host <> htif.io.host
}
def buildMMIONetwork(implicit p: Parameters) = {
val mmioNarrower = Module(new TileLinkIONarrower("L2toMMIO", "MMIO_Outermost"))
val mmioNetwork = Module(new TileLinkRecursiveInterconnect(1, addrMap.tail, memSize))
mmioNarrower.io.in <> outmemsys.io.mmio
mmioNetwork.io.in.head <> mmioNarrower.io.out
if (p(UseStreamLoopback)) {
val lo_width = p(StreamLoopbackWidth)
val lo_size = p(StreamLoopbackSize)
val lo_conv = Module(new NastiIOStreamIOConverter(lo_width))
val lo_port = addrHashMap("devices:loopback").port - 1
TopUtils.connectTilelinkNasti(lo_conv.io.nasti, mmioNetwork.io.out(lo_port))
lo_conv.io.stream.in <> Queue(lo_conv.io.stream.out, lo_size)
}
val rtc = Module(new RTC(p(NTiles)))
val rtcAddr = addrHashMap("conf:rtc")
val rtcPort = rtcAddr.port - 1
require(rtc.size <= rtcAddr.size)
rtc.io.tl <> mmioNetwork.io.out(rtcPort)
io.timerIRQs := rtc.io.irqs
val deviceTree = Module(new ROMSlave(p(ConfigString).toSeq))
val dtPort = addrHashMap("conf:devicetree").port - 1
deviceTree.io <> mmioNetwork.io.out(dtPort)
}
}
@ -213,25 +229,18 @@ class OuterMemorySystem(implicit val p: Parameters) extends Module with HasTopLe
val htif_uncached = (new ClientUncachedTileLinkIO).flip
val incoherent = Vec(nTiles, Bool()).asInput
val mem = Vec(nMemChannels, new NastiIO)
val mem_backup = new MemSerializedIO(htifW)
val mem_backup_en = Bool(INPUT)
val memory_channel_mux_select = UInt(INPUT, log2Up(memoryChannelMuxConfigs.size))
val csr = Vec(nTiles, new SmiIO(xLen, csrAddrBits))
val scr = new SmiIO(xLen, scrAddrBits)
val deviceTree = new NastiIO
val mmio = new ClientUncachedTileLinkIO()(p.alterPartial({case TLId => "L2toMMIO"}))
}
val addrMap = p(GlobalAddrMap)
val addrHashMap = new AddrHashMap(addrMap)
val memSize = addrHashMap("mem").size
val addrHashMap = new AddrHashMap(p(GlobalAddrMap))
// Create a simple L1toL2 NoC between the tiles+htif and the banks of outer memory
// Cached ports are first in client list, making sharerToClientId just an indentity function
// addrToBank is sed to hash physical addresses (of cache blocks) to banks (and thereby memory channels)
def sharerToClientId(sharerId: UInt) = sharerId
def addrToBank(addr: Bits): UInt = {
val isMemory = addrHashMap.isInRegion("mem",
addr.toUInt << log2Up(p(CacheBlockBytes)))
def addrToBank(addr: UInt): UInt = {
val isMemory = addrHashMap.isInRegion("mem", addr << log2Up(p(CacheBlockBytes)))
Mux(isMemory,
if (nBanks > 1) addr(lsb + log2Up(nBanks) - 1, lsb) else UInt(0),
UInt(nBanks))
@ -248,13 +257,12 @@ class OuterMemorySystem(implicit val p: Parameters) extends Module with HasTopLe
case InnerTLId => "L1toL2"
case OuterTLId => "L2toMMIO"
})))
val rtc = Module(new RTC(CSRs.mtime))
io.mmio <> mmioManager.io.outer
// Wire the tiles and htif to the TileLink client ports of the L1toL2 network,
// and coherence manager(s) to the other side
l1tol2net.io.clients_cached <> io.tiles_cached
l1tol2net.io.clients_uncached <> io.tiles_uncached ++ Seq(rtc.io, io.htif_uncached)
l1tol2net.io.clients_uncached <> io.tiles_uncached ++ Seq(io.htif_uncached)
l1tol2net.io.managers <> managerEndpoints.map(_.innerTL) :+ mmioManager.io.inner
// Create a converter between TileLinkIO and MemIO for each channel
@ -262,7 +270,6 @@ class OuterMemorySystem(implicit val p: Parameters) extends Module with HasTopLe
val outermostTLParams = p.alterPartial({case TLId => "Outermost"})
val backendBuffering = TileLinkDepths(0,0,0,0,0)
// TODO: the code to print this stuff should live somewhere else
println("Generated Address Map")
for ((name, base, size, _, _) <- addrHashMap.sortedEntries) {
@ -295,67 +302,6 @@ class OuterMemorySystem(implicit val p: Parameters) extends Module with HasTopLe
mem_ic.io.in(i) <> narrow.io.out
}
val mmioOutermostTLParams = p.alterPartial({case TLId => "MMIO_Outermost"})
val mmio_narrow = Module(new TileLinkIONarrower("L2toMMIO", "MMIO_Outermost"))
val mmio_net = Module(new TileLinkRecursiveInterconnect(
1, addrHashMap.nEntries - 1, addrMap.tail, memSize)(mmioOutermostTLParams))
//val mmio_conv = Module(new NastiIOTileLinkIOConverter()(outermostTLParams))
mmio_narrow.io.in <> mmioManager.io.outer
mmio_net.io.in.head <> mmio_narrow.io.out
def connectTilelinkNasti(nasti: NastiIO, tl: ClientUncachedTileLinkIO)(implicit p: Parameters) = {
val conv = Module(new NastiIOTileLinkIOConverter())
conv.io.tl <> tl
TopUtils.connectNasti(nasti, conv.io.nasti)
}
for (i <- 0 until nTiles) {
val csrName = s"conf:csr$i"
val csrPort = addrHashMap(csrName).port - 1
val conv = Module(new SmiIONastiIOConverter(xLen, csrAddrBits))
connectTilelinkNasti(conv.io.nasti, mmio_net.io.out(csrPort))(mmioOutermostTLParams)
io.csr(i) <> conv.io.smi
}
val scrPort = addrHashMap("conf:scr").port - 1
val scr_conv = Module(new SmiIONastiIOConverter(scrDataBits, scrAddrBits))
connectTilelinkNasti(scr_conv.io.nasti, mmio_net.io.out(scrPort))(mmioOutermostTLParams)
io.scr <> scr_conv.io.smi
if (p(UseStreamLoopback)) {
val lo_width = p(StreamLoopbackWidth)
val lo_size = p(StreamLoopbackSize)
val lo_conv = Module(new NastiIOStreamIOConverter(lo_width))
val lo_port = addrHashMap("devices:loopback").port - 1
connectTilelinkNasti(lo_conv.io.nasti, mmio_net.io.out(lo_port))(mmioOutermostTLParams)
lo_conv.io.stream.in <> Queue(lo_conv.io.stream.out, lo_size)
}
val dtPort = addrHashMap("conf:devicetree").port - 1
connectTilelinkNasti(io.deviceTree, mmio_net.io.out(dtPort))(mmioOutermostTLParams)
val mem_channels = Wire(Vec(nMemChannels, new NastiIO))
mem_channels.zip(mem_ic.io.out).foreach { case (ch, out) =>
connectTilelinkNasti(ch, out)(outermostTLParams)
}
// Create a SerDes for backup memory port
if(p(UseBackupMemoryPort)) {
VLSIUtils.doOuterMemorySystemSerdes(
mem_channels, io.mem, io.mem_backup, io.mem_backup_en,
1, htifW, p(CacheBlockOffsetBits))
for (i <- 1 until nMemChannels) { io.mem(i) <> mem_channels(i) }
val mem_request = mem_channels.map(io => io.ar.valid || io.aw.valid).reduce(_ || _)
val config_nchannels = Vec(channelConfigs.map(i => UInt(i)))(io.memory_channel_mux_select)
assert(!mem_request || !io.mem_backup_en || config_nchannels === UInt(1),
"Backup memory port only works when 1 memory channel is enabled")
require(channelConfigs.sortWith(_ < _)(0) == 1,
"Backup memory port requires a single memory port mux config")
} else {
io.mem <> mem_channels
io.mem_backup.req.valid := Bool(false)
}
for ((nasti, tl) <- io.mem zip mem_ic.io.out)
TopUtils.connectTilelinkNasti(nasti, tl)(outermostTLParams)
}

View File

@ -22,7 +22,6 @@ object TestBenchGeneration extends FileSystemUtilities {
wire [`HTIF_WIDTH-1:0] htif_in_bits;
wire htif_in_ready, htif_out_valid;
wire [`HTIF_WIDTH-1:0] htif_out_bits;
wire htif_out_stats;
wire mem_bk_in_valid;
wire mem_bk_out_valid;
@ -73,12 +72,6 @@ object TestBenchGeneration extends FileSystemUtilities {
wire htif_out_valid_delay;
wire htif_out_ready_delay;
wire [`HTIF_WIDTH-1:0] htif_out_bits_delay;
wire htif_out_stats_delay;
wire mem_bk_out_ready_delay;
wire mem_bk_in_valid_delay;
wire mem_bk_out_valid_delay;
assign #0.1 htif_in_valid_delay = htif_in_valid;
assign #0.1 htif_in_ready = htif_in_ready_delay;
@ -87,12 +80,6 @@ object TestBenchGeneration extends FileSystemUtilities {
assign #0.1 htif_out_valid = htif_out_valid_delay;
assign #0.1 htif_out_ready_delay = htif_out_ready;
assign #0.1 htif_out_bits = htif_out_bits_delay;
assign #0.1 htif_out_stats = htif_out_stats_delay;
assign #0.1 mem_bk_out_ready_delay = mem_bk_out_ready;
assign #0.1 mem_bk_in_valid_delay = mem_bk_in_valid;
assign #0.1 mem_bk_out_valid = mem_bk_out_valid_delay;
"""
val nasti_delays = (0 until nMemChannel) map { i => s"""
@ -216,8 +203,6 @@ object TestBenchGeneration extends FileSystemUtilities {
val instantiation = s"""
`ifdef FPGA
assign mem_bk_out_valid_delay = 1'b0;
assign htif_out_stats_delay = 1'b0;
assign htif_clk = clk;
`endif
@ -231,25 +216,9 @@ object TestBenchGeneration extends FileSystemUtilities {
`ifndef FPGA
.io_host_clk(htif_clk),
.io_host_clk_edge(),
.io_host_debug_stats_csr(htif_out_stats_delay),
`ifdef MEM_BACKUP_EN
.io_mem_backup_ctrl_en(1'b1),
`else
.io_mem_backup_ctrl_en(1'b0),
`endif // MEM_BACKUP_EN
.io_mem_backup_ctrl_in_valid(mem_bk_in_valid_delay),
.io_mem_backup_ctrl_out_ready(mem_bk_out_ready_delay),
.io_mem_backup_ctrl_out_valid(mem_bk_out_valid_delay),
`else
.io_host_clk (),
.io_host_clk_edge (),
.io_host_debug_stats_csr (),
.io_mem_backup_ctrl_en (1'b0),
.io_mem_backup_ctrl_in_valid (1'b0),
.io_mem_backup_ctrl_out_ready (1'b0),
.io_mem_backup_ctrl_out_valid (),
`endif // FPGA
.io_host_in_valid(htif_in_valid_delay),

View File

@ -7,14 +7,6 @@ import cde.Parameters
import junctions._
import uncore._
class MemDessert(topParams: Parameters) extends Module {
implicit val p = topParams
val io = new MemDesserIO(p(HtifKey).width)
val x = Module(new MemDesser(p(HtifKey).width))
x.io.narrow <> io.narrow
io.wide <> x.io.wide
}
object VLSIUtils {
def doOuterMemorySystemSerdes(
llcs: Seq[NastiIO],
@ -65,6 +57,30 @@ object VLSIUtils {
}
}
private def makeHTIFClockDivider(scr: SCRIO, host: HostIO, htifW: Int) = {
val hio = Module((new SlowIO(512)) { Bits(width = htifW) })
hio.io.set_divisor.valid := scr.wen && (scr.waddr === UInt(63))
hio.io.set_divisor.bits := scr.wdata
scr.rdata(63) := hio.io.divisor
scr.allocate(63, "HTIF_IO_CLOCK_DIVISOR")
host.clk := hio.io.clk_slow
host.clk_edge := Reg(next=host.clk && !Reg(next=host.clk))
hio
}
def padOutHTIFWithDividedClock(
htif: HostIO,
scr: SCRIO,
host: HostIO,
htifW: Int) {
val hio = makeHTIFClockDivider(scr, host, htifW)
hio.io.out_fast <> htif.out
host.out <> hio.io.out_slow
hio.io.in_slow <> host.in
htif.in <> hio.io.in_fast
}
def padOutHTIFWithDividedClock(
htif: HostIO,
scr: SCRIO,
@ -72,11 +88,7 @@ object VLSIUtils {
parent: MemBackupCtrlIO,
host: HostIO,
htifW: Int) {
val hio = Module((new SlowIO(512)) { Bits(width = htifW+1) })
hio.io.set_divisor.valid := scr.wen && (scr.waddr === UInt(63))
hio.io.set_divisor.bits := scr.wdata
scr.rdata(63) := hio.io.divisor
scr.allocate(63, "HTIF_IO_CLOCK_DIVISOR")
val hio = makeHTIFClockDivider(scr, host, htifW+1)
hio.io.out_fast.valid := htif.out.valid || child.req.valid
hio.io.out_fast.bits := Cat(htif.out.valid, Mux(htif.out.valid, htif.out.bits, child.req.bits))
@ -96,7 +108,5 @@ object VLSIUtils {
htif.in.valid := hio.io.in_fast.valid && !hio.io.in_fast.bits(htifW)
htif.in.bits := hio.io.in_fast.bits
hio.io.in_fast.ready := Mux(hio.io.in_fast.bits(htifW), Bool(true), htif.in.ready)
host.clk := hio.io.clk_slow
host.clk_edge := Reg(next=host.clk && !Reg(next=host.clk))
}
}