From 2225a6d5b4977ddf6b46b139723a565aaf81e484 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Tue, 28 Jul 2015 15:52:07 -0700 Subject: [PATCH 001/116] Initial commit --- junctions/.gitignore | 17 +++++++++++++++++ junctions/LICENSE | 28 ++++++++++++++++++++++++++++ junctions/README.md | 2 ++ 3 files changed, 47 insertions(+) create mode 100644 junctions/.gitignore create mode 100644 junctions/LICENSE create mode 100644 junctions/README.md diff --git a/junctions/.gitignore b/junctions/.gitignore new file mode 100644 index 00000000..c58d83b3 --- /dev/null +++ b/junctions/.gitignore @@ -0,0 +1,17 @@ +*.class +*.log + +# sbt specific +.cache +.history +.lib/ +dist/* +target/ +lib_managed/ +src_managed/ +project/boot/ +project/plugins/project/ + +# Scala-IDE specific +.scala_dependencies +.worksheet diff --git a/junctions/LICENSE b/junctions/LICENSE new file mode 100644 index 00000000..f4d4317a --- /dev/null +++ b/junctions/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, UC Berkeley Architecture Research +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of junctions nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/junctions/README.md b/junctions/README.md new file mode 100644 index 00000000..e200e4c0 --- /dev/null +++ b/junctions/README.md @@ -0,0 +1,2 @@ +# junctions +A repository for peripheral components and IO devices associated with the RocketChip project From 8eb20cde440e1a34d05eb4eacbc40999a73d17d6 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Tue, 28 Jul 2015 16:07:30 -0700 Subject: [PATCH 002/116] Update LICENSE --- junctions/LICENSE | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/junctions/LICENSE b/junctions/LICENSE index f4d4317a..b226e9d5 100644 --- a/junctions/LICENSE +++ b/junctions/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015, UC Berkeley Architecture Research +Copyright (c) 2015, The Regents of the University of California (Regents) All rights reserved. Redistribution and use in source and binary forms, with or without @@ -15,14 +15,14 @@ modification, are permitted provided that the following conditions are met: contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING +OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS +BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED +HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE +MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. From 6a44cd43fd8b313639ff5646b24d91926ad35d67 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Tue, 28 Jul 2015 16:20:18 -0700 Subject: [PATCH 003/116] Update README.md --- junctions/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/junctions/README.md b/junctions/README.md index e200e4c0..f88be666 100644 --- a/junctions/README.md +++ b/junctions/README.md @@ -1,2 +1,6 @@ # junctions -A repository for peripheral components and IO devices associated with the RocketChip project +A repository for peripheral components and IO devices associated with the RocketChip project. + +To uses these modules, include this repo as a git submodule within the your chip repository and add it as Project in your chip's build.scala. These components are only dependent on Chisel, i.e. + + lazy val junctions = project.dependsOn(chisel) From c27945c0946212caaeb0a52acb99629620cf4613 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Wed, 29 Jul 2015 18:02:58 -0700 Subject: [PATCH 004/116] source and build files. source code pulled from uncore and zscale repos --- junctions/build.sbt | 19 ++ junctions/project/plugins.sbt | 5 + junctions/src/main/scala/hasti.scala | 250 +++++++++++++++ junctions/src/main/scala/memserdes.scala | 374 +++++++++++++++++++++++ junctions/src/main/scala/nasti.scala | 141 +++++++++ junctions/src/main/scala/package.scala | 1 + junctions/src/main/scala/poci.scala | 88 ++++++ junctions/src/main/scala/slowio.scala | 70 +++++ 8 files changed, 948 insertions(+) create mode 100644 junctions/build.sbt create mode 100644 junctions/project/plugins.sbt create mode 100644 junctions/src/main/scala/hasti.scala create mode 100644 junctions/src/main/scala/memserdes.scala create mode 100644 junctions/src/main/scala/nasti.scala create mode 100644 junctions/src/main/scala/package.scala create mode 100644 junctions/src/main/scala/poci.scala create mode 100644 junctions/src/main/scala/slowio.scala diff --git a/junctions/build.sbt b/junctions/build.sbt new file mode 100644 index 00000000..0bb8c47d --- /dev/null +++ b/junctions/build.sbt @@ -0,0 +1,19 @@ +organization := "edu.berkeley.cs" + +version := "1.0" + +name := "junctions" + +scalaVersion := "2.10.2" + +// Provide a managed dependency on chisel if -DchiselVersion="" is supplied on the command line. +libraryDependencies ++= (Seq("chisel").map { + dep: String => sys.props.get(dep + "Version") map { "edu.berkeley.cs" %% dep % _ }}).flatten + +site.settings + +site.includeScaladoc() + +ghpages.settings + +git.remoteRepo := "git@github.com:ucb-bar/junctions.git" diff --git a/junctions/project/plugins.sbt b/junctions/project/plugins.sbt new file mode 100644 index 00000000..4f4825c4 --- /dev/null +++ b/junctions/project/plugins.sbt @@ -0,0 +1,5 @@ +resolvers += "jgit-repo" at "http://download.eclipse.org/jgit/maven" + +addSbtPlugin("com.typesafe.sbt" % "sbt-ghpages" % "0.5.3") + +addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "0.8.1") diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala new file mode 100644 index 00000000..7d8078c5 --- /dev/null +++ b/junctions/src/main/scala/hasti.scala @@ -0,0 +1,250 @@ +package junctions + +import Chisel._ + +abstract trait HASTIConstants +{ + val SZ_HTRANS = 2 + val HTRANS_IDLE = UInt(0, SZ_HTRANS) + val HTRANS_BUSY = UInt(1, SZ_HTRANS) + val HTRANS_NONSEQ = UInt(2, SZ_HTRANS) + val HTRANS_SEQ = UInt(3, SZ_HTRANS) + + val SZ_HBURST = 3 + val HBURST_SINGLE = UInt(0, SZ_HBURST) + val HBURST_INCR = UInt(1, SZ_HBURST) + val HBURST_WRAP4 = UInt(2, SZ_HBURST) + val HBURST_INCR4 = UInt(3, SZ_HBURST) + val HBURST_WRAP8 = UInt(4, SZ_HBURST) + val HBURST_INCR8 = UInt(5, SZ_HBURST) + val HBURST_WRAP16 = UInt(6, SZ_HBURST) + val HBURST_INCR16 = UInt(7, SZ_HBURST) + + val SZ_HRESP = 1 + val HRESP_OKAY = UInt(0, SZ_HRESP) + val HRESP_ERROR = UInt(1, SZ_HRESP) + + val SZ_HSIZE = 3 + val SZ_HPROT = 4 + + // TODO: Parameterize + val SZ_HADDR = 32 + val SZ_HDATA = 32 + + def dgate(valid: Bool, b: Bits) = Fill(b.getWidth, valid) & b +} + +class HASTIMasterIO extends Bundle +{ + val haddr = UInt(OUTPUT, SZ_HADDR) + val hwrite = Bool(OUTPUT) + val hsize = UInt(OUTPUT, SZ_HSIZE) + val hburst = UInt(OUTPUT, SZ_HBURST) + val hprot = UInt(OUTPUT, SZ_HPROT) + val htrans = UInt(OUTPUT, SZ_HTRANS) + val hmastlock = Bool(OUTPUT) + + val hwdata = Bits(OUTPUT, SZ_HDATA) + val hrdata = Bits(INPUT, SZ_HDATA) + + val hready = Bool(INPUT) + val hresp = UInt(INPUT, SZ_HRESP) +} + +class HASTISlaveIO extends Bundle +{ + val haddr = UInt(INPUT, SZ_HADDR) + val hwrite = Bool(INPUT) + val hsize = UInt(INPUT, SZ_HSIZE) + val hburst = UInt(INPUT, SZ_HBURST) + val hprot = UInt(INPUT, SZ_HPROT) + val htrans = UInt(INPUT, SZ_HTRANS) + val hmastlock = Bool(INPUT) + + val hwdata = Bits(INPUT, SZ_HDATA) + val hrdata = Bits(OUTPUT, SZ_HDATA) + + val hsel = Bool(INPUT) + val hreadyin = Bool(INPUT) + val hreadyout = Bool(OUTPUT) + val hresp = UInt(OUTPUT, SZ_HRESP) +} + +class HASTIBus(amap: Seq[UInt=>Bool]) extends Module +{ + val io = new Bundle { + val master = new HASTIMasterIO().flip + val slaves = Vec.fill(amap.size){new HASTISlaveIO}.flip + } + + // skid buffer + val skb_valid = Reg(init = Bool(false)) + val skb_haddr = Reg(UInt(width = SZ_HADDR)) + val skb_hwrite = Reg(Bool()) + val skb_hsize = Reg(UInt(width = SZ_HSIZE)) + val skb_hburst = Reg(UInt(width = SZ_HBURST)) + val skb_hprot = Reg(UInt(width = SZ_HPROT)) + val skb_htrans = Reg(UInt(width = SZ_HTRANS)) + val skb_hmastlock = Reg(Bool()) + val skb_hwdata = Reg(UInt(width = SZ_HDATA)) + + val master_haddr = Mux(skb_valid, skb_haddr, io.master.haddr) + val master_hwrite = Mux(skb_valid, skb_hwrite, io.master.hwrite) + val master_hsize = Mux(skb_valid, skb_hsize, io.master.hsize) + val master_hburst = Mux(skb_valid, skb_hburst, io.master.hburst) + val master_hprot = Mux(skb_valid, skb_hprot, io.master.hprot) + val master_htrans = Mux(skb_valid, skb_htrans, io.master.htrans) + val master_hmastlock = Mux(skb_valid, skb_hmastlock, io.master.hmastlock) + val master_hwdata = Mux(skb_valid, skb_hwdata, io.master.hwdata) + + val hsels = PriorityEncoderOH( + (io.slaves zip amap) map { case (s, afn) => { + s.haddr := master_haddr + s.hwrite := master_hwrite + s.hsize := master_hsize + s.hburst := master_hburst + s.hprot := master_hprot + s.htrans := master_htrans + s.hmastlock := master_hmastlock + s.hwdata := master_hwdata + afn(master_haddr) && master_htrans.orR + }}) + + (io.slaves zip hsels) foreach { case (s, hsel) => { + s.hsel := hsel + s.hreadyin := skb_valid || io.master.hready + } } + + val s1_hsels = Vec.fill(amap.size){Reg(init = Bool(false))} + val hreadyouts = io.slaves.map(_.hreadyout) + val master_hready = s1_hsels.reduce(_||_) === Bool(false) || Mux1H(s1_hsels, hreadyouts) + + when (master_hready) { + val skid = s1_hsels.reduce(_||_) && (hsels zip hreadyouts).map{ case (s, r) => s && !r }.reduce(_||_) + skb_valid := skid + when (skid) { + skb_haddr := io.master.haddr + skb_hwrite := io.master.hwrite + skb_hsize := io.master.hsize + skb_hburst := io.master.hburst + skb_hprot := io.master.hprot + skb_htrans := io.master.htrans + skb_hmastlock := io.master.hmastlock + } + + (s1_hsels zip hsels) foreach { case (s1, s) => + s1 := s + } + } + + io.master.hready := !skb_valid && master_hready + io.master.hrdata := Mux1H(s1_hsels, io.slaves.map(_.hrdata)) + io.master.hresp := Mux1H(s1_hsels, io.slaves.map(_.hresp)) +} + +class HASTISlaveMux(n: Int) extends Module +{ + val io = new Bundle { + val ins = Vec.fill(n){new HASTISlaveIO} + val out = new HASTISlaveIO().flip + } + + // skid buffers + val skb_valid = Vec.fill(n){Reg(init = Bool(false))} + val skb_haddr = Vec.fill(n){Reg(UInt(width = SZ_HADDR))} + val skb_hwrite = Vec.fill(n){Reg(Bool())} + val skb_hsize = Vec.fill(n){Reg(UInt(width = SZ_HSIZE))} + val skb_hburst = Vec.fill(n){Reg(UInt(width = SZ_HBURST))} + val skb_hprot = Vec.fill(n){Reg(UInt(width = SZ_HPROT))} + val skb_htrans = Vec.fill(n){Reg(UInt(width = SZ_HTRANS))} + val skb_hmastlock = Vec.fill(n){Reg(Bool())} + + val requests = (io.ins zip skb_valid) map { case (in, v) => in.hsel && in.hreadyin || v } + val grants = PriorityEncoderOH(requests) + + val s1_grants = Vec.fill(n){Reg(init = Bool(true))} + + (s1_grants zip grants) foreach { case (g1, g) => + when (io.out.hreadyout) { g1 := g } + } + + def sel[T <: Data](in: Vec[T], s1: Vec[T]) = + Vec((skb_valid zip s1 zip in) map { case ((v, s), in) => Mux(v, s, in) }) + + io.out.haddr := Mux1H(grants, sel(Vec(io.ins.map(_.haddr)), skb_haddr)) + io.out.hwrite := Mux1H(grants, sel(Vec(io.ins.map(_.hwrite)), skb_hwrite)) + io.out.hsize := Mux1H(grants, sel(Vec(io.ins.map(_.hsize)), skb_hsize)) + io.out.hburst := Mux1H(grants, sel(Vec(io.ins.map(_.hburst)), skb_hburst)) + io.out.hprot := Mux1H(grants, sel(Vec(io.ins.map(_.hprot)), skb_hprot)) + io.out.htrans := Mux1H(grants, sel(Vec(io.ins.map(_.htrans)), skb_htrans)) + io.out.hmastlock := Mux1H(grants, sel(Vec(io.ins.map(_.hmastlock)), skb_hmastlock)) + io.out.hsel := grants.reduce(_||_) + + (io.ins zipWithIndex) map { case (in, i) => { + when (io.out.hreadyout) { + when (grants(i)) { + skb_valid(i) := Bool(false) + } + when (!grants(i) && !skb_valid(i)) { + val valid = in.hsel && in.hreadyin + skb_valid(i) := valid + when (valid) { // clock-gate + skb_haddr(i) := in.haddr + skb_hwrite(i) := in.hwrite + skb_hsize(i) := in.hsize + skb_hburst(i) := in.hburst + skb_hprot(i) := in.hprot + skb_htrans(i) := in.htrans + skb_hmastlock(i) := in.hmastlock + } + } + } + } } + + io.out.hwdata := Mux1H(s1_grants, io.ins.map(_.hwdata)) + io.out.hreadyin := io.out.hreadyout + + (io.ins zipWithIndex) foreach { case (in, i) => { + val g1 = s1_grants(i) + in.hrdata := dgate(g1, io.out.hrdata) + in.hreadyout := io.out.hreadyout && (!skb_valid(i) || g1) + in.hresp := dgate(g1, io.out.hresp) + } } +} + +class HASTIXbar(nMasters: Int, addressMap: Seq[UInt=>Bool]) extends Module +{ + val io = new Bundle { + val masters = Vec.fill(nMasters){new HASTIMasterIO}.flip + val slaves = Vec.fill(addressMap.size){new HASTISlaveIO}.flip + } + + val buses = List.fill(nMasters){Module(new HASTIBus(addressMap))} + val muxes = List.fill(addressMap.size){Module(new HASTISlaveMux(nMasters))} + + (buses.map(b => b.io.master) zip io.masters) foreach { case (b, m) => b <> m } + (muxes.map(m => m.io.out) zip io.slaves ) foreach { case (x, s) => x <> s } + for (m <- 0 until nMasters; s <- 0 until addressMap.size) yield { + buses(m).io.slaves(s) <> muxes(s).io.ins(m) + } +} + +class HASTISlaveToMaster extends Module +{ + val io = new Bundle { + val in = new HASTISlaveIO + val out = new HASTIMasterIO + } + + io.out.haddr := io.in.haddr + io.out.hwrite := io.in.hwrite + io.out.hsize := io.in.hsize + io.out.hburst := io.in.hburst + io.out.hprot := io.in.hprot + io.out.htrans := Mux(io.in.hsel && io.in.hreadyin, io.in.htrans, HTRANS_IDLE) + io.out.hmastlock := io.in.hmastlock + io.out.hwdata := io.in.hwdata + io.in.hrdata := io.out.hrdata + io.in.hreadyout := io.out.hready + io.in.hresp := io.out.hresp +} diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala new file mode 100644 index 00000000..1fd72044 --- /dev/null +++ b/junctions/src/main/scala/memserdes.scala @@ -0,0 +1,374 @@ +// See LICENSE for license details. + +package junctions +import Chisel._ +import scala.math._ + +case object PAddrBits extends Field[Int] +case object VAddrBits extends Field[Int] +case object PgIdxBits extends Field[Int] +case object PgLevels extends Field[Int] +case object PgLevelBits extends Field[Int] +case object ASIdBits extends Field[Int] +case object PPNBits extends Field[Int] +case object VPNBits extends Field[Int] + +case object MIFAddrBits extends Field[Int] +case object MIFDataBits extends Field[Int] +case object MIFTagBits extends Field[Int] +case object MIFDataBeats extends Field[Int] + +trait MIFParameters extends UsesParameters { + val mifTagBits = params(MIFTagBits) + val mifAddrBits = params(MIFAddrBits) + val mifDataBits = params(MIFDataBits) + val mifDataBeats = params(MIFDataBeats) +} + +abstract class MIFBundle extends Bundle with MIFParameters +abstract class MIFModule extends Module with MIFParameters + +trait HasMemData extends MIFBundle { + val data = Bits(width = mifDataBits) +} + +trait HasMemAddr extends MIFBundle { + val addr = UInt(width = mifAddrBits) +} + +trait HasMemTag extends MIFBundle { + val tag = UInt(width = mifTagBits) +} + +class MemReqCmd extends HasMemAddr with HasMemTag { + val rw = Bool() +} + +class MemTag extends HasMemTag +class MemData extends HasMemData +class MemResp extends HasMemData with HasMemTag + +class MemIO extends Bundle { + val req_cmd = Decoupled(new MemReqCmd) + val req_data = Decoupled(new MemData) + val resp = Decoupled(new MemResp).flip +} + +class MemPipeIO extends Bundle { + val req_cmd = Decoupled(new MemReqCmd) + val req_data = Decoupled(new MemData) + val resp = Valid(new MemResp).flip +} + +class MemSerializedIO(w: Int) extends Bundle { + val req = Decoupled(Bits(width = w)) + val resp = Valid(Bits(width = w)).flip +} + +class MemSerdes(w: Int) extends MIFModule +{ + val io = new Bundle { + val wide = new MemIO().flip + val narrow = new MemSerializedIO(w) + } + val abits = io.wide.req_cmd.bits.toBits.getWidth + val dbits = io.wide.req_data.bits.toBits.getWidth + val rbits = io.wide.resp.bits.getWidth + + val out_buf = Reg(Bits()) + val in_buf = Reg(Bits()) + + val s_idle :: s_read_addr :: s_write_addr :: s_write_idle :: s_write_data :: Nil = Enum(UInt(), 5) + val state = Reg(init=s_idle) + val send_cnt = Reg(init=UInt(0, log2Up((max(abits, dbits)+w-1)/w))) + val data_send_cnt = Reg(init=UInt(0, log2Up(mifDataBeats))) + val adone = io.narrow.req.ready && send_cnt === UInt((abits-1)/w) + val ddone = io.narrow.req.ready && send_cnt === UInt((dbits-1)/w) + + when (io.narrow.req.valid && io.narrow.req.ready) { + send_cnt := send_cnt + UInt(1) + out_buf := out_buf >> UInt(w) + } + when (io.wide.req_cmd.valid && io.wide.req_cmd.ready) { + out_buf := io.wide.req_cmd.bits.toBits + } + when (io.wide.req_data.valid && io.wide.req_data.ready) { + out_buf := io.wide.req_data.bits.toBits + } + + io.wide.req_cmd.ready := state === s_idle + io.wide.req_data.ready := state === s_write_idle + io.narrow.req.valid := state === s_read_addr || state === s_write_addr || state === s_write_data + io.narrow.req.bits := out_buf + + when (state === s_idle && io.wide.req_cmd.valid) { + state := Mux(io.wide.req_cmd.bits.rw, s_write_addr, s_read_addr) + } + when (state === s_read_addr && adone) { + state := s_idle + send_cnt := UInt(0) + } + when (state === s_write_addr && adone) { + state := s_write_idle + send_cnt := UInt(0) + } + when (state === s_write_idle && io.wide.req_data.valid) { + state := s_write_data + } + when (state === s_write_data && ddone) { + data_send_cnt := data_send_cnt + UInt(1) + state := Mux(data_send_cnt === UInt(mifDataBeats-1), s_idle, s_write_idle) + send_cnt := UInt(0) + } + + val recv_cnt = Reg(init=UInt(0, log2Up((rbits+w-1)/w))) + val data_recv_cnt = Reg(init=UInt(0, log2Up(mifDataBeats))) + val resp_val = Reg(init=Bool(false)) + + resp_val := Bool(false) + when (io.narrow.resp.valid) { + recv_cnt := recv_cnt + UInt(1) + when (recv_cnt === UInt((rbits-1)/w)) { + recv_cnt := UInt(0) + data_recv_cnt := data_recv_cnt + UInt(1) + resp_val := Bool(true) + } + in_buf := Cat(io.narrow.resp.bits, in_buf((rbits+w-1)/w*w-1,w)) + } + + io.wide.resp.valid := resp_val + io.wide.resp.bits := io.wide.resp.bits.fromBits(in_buf) +} + +class MemDesserIO(w: Int) extends Bundle { + val narrow = new MemSerializedIO(w).flip + val wide = new MemIO +} + +class MemDesser(w: Int) extends Module // test rig side +{ + val io = new MemDesserIO(w) + val abits = io.wide.req_cmd.bits.toBits.getWidth + val dbits = io.wide.req_data.bits.toBits.getWidth + val rbits = io.wide.resp.bits.getWidth + val mifDataBeats = params(MIFDataBeats) + + require(dbits >= abits && rbits >= dbits) + val recv_cnt = Reg(init=UInt(0, log2Up((rbits+w-1)/w))) + val data_recv_cnt = Reg(init=UInt(0, log2Up(mifDataBeats))) + val adone = io.narrow.req.valid && recv_cnt === UInt((abits-1)/w) + val ddone = io.narrow.req.valid && recv_cnt === UInt((dbits-1)/w) + val rdone = io.narrow.resp.valid && recv_cnt === UInt((rbits-1)/w) + + val s_cmd_recv :: s_cmd :: s_data_recv :: s_data :: s_reply :: Nil = Enum(UInt(), 5) + val state = Reg(init=s_cmd_recv) + + val in_buf = Reg(Bits()) + when (io.narrow.req.valid && io.narrow.req.ready || io.narrow.resp.valid) { + recv_cnt := recv_cnt + UInt(1) + in_buf := Cat(io.narrow.req.bits, in_buf((rbits+w-1)/w*w-1,w)) + } + io.narrow.req.ready := state === s_cmd_recv || state === s_data_recv + + when (state === s_cmd_recv && adone) { + state := s_cmd + recv_cnt := UInt(0) + } + when (state === s_cmd && io.wide.req_cmd.ready) { + state := Mux(io.wide.req_cmd.bits.rw, s_data_recv, s_reply) + } + when (state === s_data_recv && ddone) { + state := s_data + recv_cnt := UInt(0) + } + when (state === s_data && io.wide.req_data.ready) { + state := s_data_recv + when (data_recv_cnt === UInt(mifDataBeats-1)) { + state := s_cmd_recv + } + data_recv_cnt := data_recv_cnt + UInt(1) + } + when (rdone) { // state === s_reply + when (data_recv_cnt === UInt(mifDataBeats-1)) { + state := s_cmd_recv + } + recv_cnt := UInt(0) + data_recv_cnt := data_recv_cnt + UInt(1) + } + + val req_cmd = in_buf >> UInt(((rbits+w-1)/w - (abits+w-1)/w)*w) + io.wide.req_cmd.valid := state === s_cmd + io.wide.req_cmd.bits := io.wide.req_cmd.bits.fromBits(req_cmd) + + io.wide.req_data.valid := state === s_data + io.wide.req_data.bits.data := in_buf >> UInt(((rbits+w-1)/w - (dbits+w-1)/w)*w) + + val dataq = Module(new Queue(new MemResp, mifDataBeats)) + dataq.io.enq <> io.wide.resp + dataq.io.deq.ready := recv_cnt === UInt((rbits-1)/w) + + io.narrow.resp.valid := dataq.io.deq.valid + io.narrow.resp.bits := dataq.io.deq.bits.toBits >> (recv_cnt * UInt(w)) +} + +class HellaFlowQueue[T <: Data](val entries: Int)(data: => T) extends Module +{ + val io = new QueueIO(data, entries) + require(entries > 1) + + val do_flow = Wire(Bool()) + val do_enq = io.enq.fire() && !do_flow + val do_deq = io.deq.fire() && !do_flow + + val maybe_full = Reg(init=Bool(false)) + val enq_ptr = Counter(do_enq, entries)._1 + val (deq_ptr, deq_done) = Counter(do_deq, entries) + when (do_enq != do_deq) { maybe_full := do_enq } + + val ptr_match = enq_ptr === deq_ptr + val empty = ptr_match && !maybe_full + val full = ptr_match && maybe_full + val atLeastTwo = full || enq_ptr - deq_ptr >= UInt(2) + do_flow := empty && io.deq.ready + + val ram = SeqMem(data, entries) + when (do_enq) { ram.write(enq_ptr, io.enq.bits) } + + val ren = io.deq.ready && (atLeastTwo || !io.deq.valid && !empty) + val raddr = Mux(io.deq.valid, Mux(deq_done, UInt(0), deq_ptr + UInt(1)), deq_ptr) + val ram_out_valid = Reg(next = ren) + + io.deq.valid := Mux(empty, io.enq.valid, ram_out_valid) + io.enq.ready := !full + io.deq.bits := Mux(empty, io.enq.bits, ram.read(raddr, ren)) +} + +class HellaQueue[T <: Data](val entries: Int)(data: => T) extends Module +{ + val io = new QueueIO(data, entries) + + val fq = Module(new HellaFlowQueue(entries)(data)) + io.enq <> fq.io.enq + io.deq <> Queue(fq.io.deq, 1, pipe = true) +} + +object HellaQueue +{ + def apply[T <: Data](enq: DecoupledIO[T], entries: Int) = { + val q = Module((new HellaQueue(entries)) { enq.bits }) + q.io.enq.valid := enq.valid // not using <> so that override is allowed + q.io.enq.bits := enq.bits + enq.ready := q.io.enq.ready + q.io.deq + } +} + +class MemIOArbiter(val arbN: Int) extends MIFModule { + val io = new Bundle { + val inner = Vec.fill(arbN){new MemIO}.flip + val outer = new MemIO + } + + if(arbN > 1) { + val cmd_arb = Module(new RRArbiter(new MemReqCmd, arbN)) + val choice_q = Module(new Queue(cmd_arb.io.chosen, 4)) + val (data_cnt, data_done) = Counter(io.outer.req_data.fire(), mifDataBeats) + + io.inner.map(_.req_cmd).zipWithIndex.zip(cmd_arb.io.in).map{ case ((req, id), arb) => { + arb.valid := req.valid + arb.bits := req.bits + arb.bits.tag := Cat(req.bits.tag, UInt(id)) + req.ready := arb.ready + }} + io.outer.req_cmd.bits := cmd_arb.io.out.bits + io.outer.req_cmd.valid := cmd_arb.io.out.valid && choice_q.io.enq.ready + cmd_arb.io.out.ready := io.outer.req_cmd.ready && choice_q.io.enq.ready + choice_q.io.enq.bits := cmd_arb.io.chosen + choice_q.io.enq.valid := cmd_arb.io.out.fire() && cmd_arb.io.out.bits.rw + + io.outer.req_data.bits := io.inner(choice_q.io.deq.bits).req_data.bits + io.outer.req_data.valid := io.inner(choice_q.io.deq.bits).req_data.valid && choice_q.io.deq.valid + io.inner.map(_.req_data.ready).zipWithIndex.foreach { + case(r, i) => r := UInt(i) === choice_q.io.deq.bits && choice_q.io.deq.valid + } + choice_q.io.deq.ready := data_done + + io.outer.resp.ready := Bool(false) + for (i <- 0 until arbN) { + io.inner(i).resp.valid := Bool(false) + when(io.outer.resp.bits.tag(log2Up(arbN)-1,0).toUInt === UInt(i)) { + io.inner(i).resp.valid := io.outer.resp.valid + io.outer.resp.ready := io.inner(i).resp.ready + } + io.inner(i).resp.bits := io.outer.resp.bits + io.inner(i).resp.bits.tag := io.outer.resp.bits.tag >> UInt(log2Up(arbN)) + } + } else { io.inner.head <> io.outer } +} + +object MemIOMemPipeIOConverter { + def apply(in: MemPipeIO): MemIO = { + val out = Wire(new MemIO()) + in.resp.valid := out.resp.valid + in.resp.bits := out.resp.bits + out.resp.ready := Bool(true) + out.req_cmd.valid := in.req_cmd.valid + out.req_cmd.bits := in.req_cmd.bits + in.req_cmd.ready := out.req_cmd.ready + out.req_data.valid := in.req_data.valid + out.req_data.bits := in.req_data.bits + in.req_data.ready := out.req_data.ready + out + } +} + +class MemPipeIOMemIOConverter(numRequests: Int) extends MIFModule { + val io = new Bundle { + val cpu = new MemIO().flip + val mem = new MemPipeIO + } + + val numEntries = numRequests * mifDataBeats + val size = log2Down(numEntries) + 1 + + val inc = Wire(Bool()) + val dec = Wire(Bool()) + val count = Reg(init=UInt(numEntries, size)) + val watermark = count >= UInt(mifDataBeats) + + when (inc && !dec) { + count := count + UInt(1) + } + when (!inc && dec) { + count := count - UInt(mifDataBeats) + } + when (inc && dec) { + count := count - UInt(mifDataBeats-1) + } + + val cmdq_mask = io.cpu.req_cmd.bits.rw || watermark + + io.mem.req_cmd.valid := io.cpu.req_cmd.valid && cmdq_mask + io.cpu.req_cmd.ready := io.mem.req_cmd.ready && cmdq_mask + io.mem.req_cmd.bits := io.cpu.req_cmd.bits + + io.mem.req_data <> io.cpu.req_data + + // Have separate queues to allow for different mem implementations + val resp_data_q = Module((new HellaQueue(numEntries)) { new MemData }) + resp_data_q.io.enq.valid := io.mem.resp.valid + resp_data_q.io.enq.bits.data := io.mem.resp.bits.data + + val resp_tag_q = Module((new HellaQueue(numEntries)) { new MemTag }) + resp_tag_q.io.enq.valid := io.mem.resp.valid + resp_tag_q.io.enq.bits.tag := io.mem.resp.bits.tag + + io.cpu.resp.valid := resp_data_q.io.deq.valid && resp_tag_q.io.deq.valid + io.cpu.resp.bits.data := resp_data_q.io.deq.bits.data + io.cpu.resp.bits.tag := resp_tag_q.io.deq.bits.tag + resp_data_q.io.deq.ready := io.cpu.resp.ready + resp_tag_q.io.deq.ready := io.cpu.resp.ready + + inc := resp_data_q.io.deq.fire() && resp_tag_q.io.deq.fire() + dec := io.mem.req_cmd.fire() && !io.mem.req_cmd.bits.rw +} diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala new file mode 100644 index 00000000..c66c9526 --- /dev/null +++ b/junctions/src/main/scala/nasti.scala @@ -0,0 +1,141 @@ +// See LICENSE for license details. + +package junctions +import Chisel._ +import scala.math.max + +case object NASTIDataBits extends Field[Int] +case object NASTIAddrBits extends Field[Int] +case object NASTIIdBits extends Field[Int] + +trait NASTIParameters extends UsesParameters { + val nastiXDataBits = params(NASTIDataBits) + val nastiWStrobeBits = nastiXDataBits / 8 + val nastiXAddrBits = params(NASTIAddrBits) + val nastiWIdBits = params(NASTIIdBits) + val nastiRIdBits = params(NASTIIdBits) + val nastiXIdBits = max(nastiWIdBits, nastiRIdBits) + val nastiXUserBits = 1 + val nastiAWUserBits = nastiXUserBits + val nastiWUserBits = nastiXUserBits + val nastiBUserBits = nastiXUserBits + val nastiARUserBits = nastiXUserBits + val nastiRUserBits = nastiXUserBits + val nastiXLenBits = 8 + val nastiXSizeBits = 3 + val nastiXBurstBits = 2 + val nastiXCacheBits = 4 + val nastiXProtBits = 3 + val nastiXQosBits = 4 + val nastiXRegionBits = 4 + val nastiXRespBits = 2 + + def bytesToXSize(bytes: UInt) = MuxLookup(bytes, UInt("b111"), Array( + UInt(1) -> UInt(0), + UInt(2) -> UInt(1), + UInt(4) -> UInt(2), + UInt(8) -> UInt(3), + UInt(16) -> UInt(4), + UInt(32) -> UInt(5), + UInt(64) -> UInt(6), + UInt(128) -> UInt(7))) +} + +abstract class NASTIBundle extends Bundle with NASTIParameters +abstract class NASTIModule extends Module with NASTIParameters + +trait NASTIChannel extends NASTIBundle +trait NASTIMasterToSlaveChannel extends NASTIChannel +trait NASTISlaveToMasterChannel extends NASTIChannel + +class NASTIMasterIO extends Bundle { + val aw = Decoupled(new NASTIWriteAddressChannel) + val w = Decoupled(new NASTIWriteDataChannel) + val b = Decoupled(new NASTIWriteResponseChannel).flip + val ar = Decoupled(new NASTIReadAddressChannel) + val r = Decoupled(new NASTIReadDataChannel).flip +} + +class NASTISlaveIO extends NASTIMasterIO { flip() } + +trait HasNASTIMetadata extends NASTIBundle { + val addr = UInt(width = nastiXAddrBits) + val len = UInt(width = nastiXLenBits) + val size = UInt(width = nastiXSizeBits) + val burst = UInt(width = nastiXBurstBits) + val lock = Bool() + val cache = UInt(width = nastiXCacheBits) + val prot = UInt(width = nastiXProtBits) + val qos = UInt(width = nastiXQosBits) + val region = UInt(width = nastiXRegionBits) +} + +trait HasNASTIData extends NASTIBundle { + val data = UInt(width = nastiXDataBits) + val last = Bool() +} + +class NASTIAddressChannel extends NASTIMasterToSlaveChannel with HasNASTIMetadata + +class NASTIResponseChannel extends NASTISlaveToMasterChannel { + val resp = UInt(width = nastiXRespBits) +} + +class NASTIWriteAddressChannel extends NASTIAddressChannel { + val id = UInt(width = nastiWIdBits) + val user = UInt(width = nastiAWUserBits) +} + +class NASTIWriteDataChannel extends NASTIMasterToSlaveChannel with HasNASTIData { + val strb = UInt(width = nastiWStrobeBits) + val user = UInt(width = nastiWUserBits) +} + +class NASTIWriteResponseChannel extends NASTIResponseChannel { + val id = UInt(width = nastiWIdBits) + val user = UInt(width = nastiBUserBits) +} + +class NASTIReadAddressChannel extends NASTIAddressChannel { + val id = UInt(width = nastiRIdBits) + val user = UInt(width = nastiARUserBits) +} + +class NASTIReadDataChannel extends NASTIResponseChannel with HasNASTIData { + val id = UInt(width = nastiRIdBits) + val user = UInt(width = nastiRUserBits) +} + +class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule with NASTIParameters { + val io = new Bundle { + val nasti = new NASTISlaveIO + val mem = new MemIO + } + + require(mifDataBits == nastiXDataBits, "Data sizes between LLC and MC don't agree") + val (mif_cnt_out, mif_wrap_out) = Counter(io.mem.resp.fire(), mifDataBeats) + + io.mem.req_cmd.bits.addr := Mux(io.nasti.aw.valid, io.nasti.aw.bits.addr, io.nasti.ar.bits.addr) >> + UInt(cacheBlockOffsetBits) + io.mem.req_cmd.bits.tag := Mux(io.nasti.aw.valid, io.nasti.aw.bits.id, io.nasti.ar.bits.id) + io.mem.req_cmd.bits.rw := io.nasti.aw.valid + io.mem.req_cmd.valid := (io.nasti.aw.valid && io.nasti.b.ready) || io.nasti.ar.valid + io.nasti.ar.ready := io.mem.req_cmd.ready && !io.nasti.aw.valid + io.nasti.aw.ready := io.mem.req_cmd.ready && io.nasti.b.ready + + io.nasti.b.valid := io.nasti.aw.valid && io.mem.req_cmd.ready + io.nasti.b.bits.id := io.nasti.aw.bits.id + io.nasti.b.bits.resp := UInt(0) + + io.nasti.w.ready := io.mem.req_data.ready + io.mem.req_data.valid := io.nasti.w.valid + io.mem.req_data.bits.data := io.nasti.w.bits.data + assert(!io.nasti.w.valid || io.nasti.w.bits.strb.andR, "MemIO must write full cache line") + + io.nasti.r.valid := io.mem.resp.valid + io.nasti.r.bits.data := io.mem.resp.bits.data + io.nasti.r.bits.last := mif_wrap_out + io.nasti.r.bits.id := io.mem.resp.bits.tag + io.nasti.r.bits.resp := UInt(0) + io.mem.resp.ready := io.nasti.r.ready +} diff --git a/junctions/src/main/scala/package.scala b/junctions/src/main/scala/package.scala new file mode 100644 index 00000000..deb7549d --- /dev/null +++ b/junctions/src/main/scala/package.scala @@ -0,0 +1 @@ +package object junctions extends HASTIConstants with POCIConstants diff --git a/junctions/src/main/scala/poci.scala b/junctions/src/main/scala/poci.scala new file mode 100644 index 00000000..f6eaece2 --- /dev/null +++ b/junctions/src/main/scala/poci.scala @@ -0,0 +1,88 @@ +package junctions + +import Chisel._ + +abstract trait POCIConstants +{ + val SZ_PADDR = 32 + val SZ_PDATA = 32 +} + +class POCIIO extends Bundle +{ + val paddr = UInt(OUTPUT, SZ_PADDR) + val pwrite = Bool(OUTPUT) + val psel = Bool(OUTPUT) + val penable = Bool(OUTPUT) + val pwdata = UInt(OUTPUT, SZ_PDATA) + val prdata = UInt(INPUT, SZ_PDATA) + val pready = Bool(INPUT) + val pslverr = Bool(INPUT) +} + +class HASTItoPOCIBridge extends Module +{ + val io = new Bundle { + val in = new HASTISlaveIO + val out = new POCIIO + } + + val s_idle :: s_setup :: s_access :: Nil = Enum(UInt(), 3) + val state = Reg(init = s_idle) + val transfer = io.in.hsel & io.in.hreadyin & io.in.htrans(1) + + switch (state) { + is (s_idle) { + when (transfer) { state := s_setup } + } + is (s_setup) { + state := s_access + } + is (s_access) { + when (io.out.pready & ~transfer) { state := s_idle } + when (io.out.pready & transfer) { state := s_setup } + when (~io.out.pready) { state := s_access } + } + } + + val haddr_reg = Reg(UInt(width = SZ_PADDR)) + val hwrite_reg = Reg(UInt(width = 1)) + when (transfer) { + haddr_reg := io.in.haddr + hwrite_reg := io.in.hwrite + } + + io.out.paddr := haddr_reg + io.out.pwrite := hwrite_reg(0) + io.out.psel := (state != s_idle) + io.out.penable := (state === s_access) + io.out.pwdata := io.in.hwdata + io.in.hrdata := io.out.prdata + io.in.hreadyout := ((state === s_access) & io.out.pready) | (state === s_idle) + io.in.hresp := io.out.pslverr +} + +class POCIBus(amap: Seq[UInt=>Bool]) extends Module +{ + val io = new Bundle { + val master = new POCIIO().flip + val slaves = Vec.fill(amap.size){new POCIIO} + } + + val psels = PriorityEncoderOH( + (io.slaves zip amap) map { case (s, afn) => { + s.paddr := io.master.paddr + s.pwrite := io.master.pwrite + s.pwdata := io.master.pwdata + afn(io.master.paddr) && io.master.psel + }}) + + (io.slaves zip psels) foreach { case (s, psel) => { + s.psel := psel + s.penable := io.master.penable && psel + } } + + io.master.prdata := Mux1H(psels, io.slaves.map(_.prdata)) + io.master.pready := Mux1H(psels, io.slaves.map(_.pready)) + io.master.pslverr := Mux1H(psels, io.slaves.map(_.pslverr)) +} diff --git a/junctions/src/main/scala/slowio.scala b/junctions/src/main/scala/slowio.scala new file mode 100644 index 00000000..b7a12226 --- /dev/null +++ b/junctions/src/main/scala/slowio.scala @@ -0,0 +1,70 @@ +// See LICENSE for license details. + +package junctions +import Chisel._ + +class SlowIO[T <: Data](val divisor_max: Int)(data: => T) extends Module +{ + val io = new Bundle { + val out_fast = Decoupled(data).flip + val out_slow = Decoupled(data) + val in_fast = Decoupled(data) + val in_slow = Decoupled(data).flip + val clk_slow = Bool(OUTPUT) + val set_divisor = Valid(Bits(width = 32)).flip + val divisor = Bits(OUTPUT, 32) + } + + require(divisor_max >= 8 && divisor_max <= 65536 && isPow2(divisor_max)) + val divisor = Reg(init=UInt(divisor_max-1)) + val d_shadow = Reg(init=UInt(divisor_max-1)) + val hold = Reg(init=UInt(divisor_max/4-1)) + val h_shadow = Reg(init=UInt(divisor_max/4-1)) + when (io.set_divisor.valid) { + d_shadow := io.set_divisor.bits(log2Up(divisor_max)-1, 0).toUInt + h_shadow := io.set_divisor.bits(log2Up(divisor_max)-1+16, 16).toUInt + } + io.divisor := hold << UInt(16) | divisor + + val count = Reg{UInt(width = log2Up(divisor_max))} + val myclock = Reg{Bool()} + count := count + UInt(1) + + val rising = count === (divisor >> UInt(1)) + val falling = count === divisor + val held = count === (divisor >> UInt(1)) + hold + + when (falling) { + divisor := d_shadow + hold := h_shadow + count := UInt(0) + myclock := Bool(false) + } + when (rising) { + myclock := Bool(true) + } + + val in_slow_rdy = Reg(init=Bool(false)) + val out_slow_val = Reg(init=Bool(false)) + val out_slow_bits = Reg(data) + + val fromhost_q = Module(new Queue(data,1)) + fromhost_q.io.enq.valid := rising && (io.in_slow.valid && in_slow_rdy || this.reset) + fromhost_q.io.enq.bits := io.in_slow.bits + fromhost_q.io.deq <> io.in_fast + + val tohost_q = Module(new Queue(data,1)) + tohost_q.io.enq <> io.out_fast + tohost_q.io.deq.ready := rising && io.out_slow.ready && out_slow_val + + when (held) { + in_slow_rdy := fromhost_q.io.enq.ready + out_slow_val := tohost_q.io.deq.valid + out_slow_bits := Mux(this.reset, fromhost_q.io.deq.bits, tohost_q.io.deq.bits) + } + + io.in_slow.ready := in_slow_rdy + io.out_slow.valid := out_slow_val + io.out_slow.bits := out_slow_bits + io.clk_slow := myclock +} From eb57433f430c2cdeacae729577479925ffc69b6c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 30 Jul 2015 23:56:47 -0700 Subject: [PATCH 005/116] Bits -> UInt --- junctions/src/main/scala/hasti.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 7d8078c5..00c4c963 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -31,7 +31,7 @@ abstract trait HASTIConstants val SZ_HADDR = 32 val SZ_HDATA = 32 - def dgate(valid: Bool, b: Bits) = Fill(b.getWidth, valid) & b + def dgate(valid: Bool, b: UInt) = Fill(b.getWidth, valid) & b } class HASTIMasterIO extends Bundle From d85c46bc600a815f3e83d732f73e9f731fb836bc Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 3 Aug 2015 19:47:16 -0700 Subject: [PATCH 006/116] Chisel3 bulk connect non-commutativity --- junctions/src/main/scala/memserdes.scala | 4 ++-- junctions/src/main/scala/slowio.scala | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index 1fd72044..bb92690c 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -248,7 +248,7 @@ class HellaQueue[T <: Data](val entries: Int)(data: => T) extends Module val io = new QueueIO(data, entries) val fq = Module(new HellaFlowQueue(entries)(data)) - io.enq <> fq.io.enq + fq.io.enq <> io.enq io.deq <> Queue(fq.io.deq, 1, pipe = true) } @@ -303,7 +303,7 @@ class MemIOArbiter(val arbN: Int) extends MIFModule { io.inner(i).resp.bits := io.outer.resp.bits io.inner(i).resp.bits.tag := io.outer.resp.bits.tag >> UInt(log2Up(arbN)) } - } else { io.inner.head <> io.outer } + } else { io.outer <> io.inner.head } } object MemIOMemPipeIOConverter { diff --git a/junctions/src/main/scala/slowio.scala b/junctions/src/main/scala/slowio.scala index b7a12226..ae3ef249 100644 --- a/junctions/src/main/scala/slowio.scala +++ b/junctions/src/main/scala/slowio.scala @@ -51,7 +51,7 @@ class SlowIO[T <: Data](val divisor_max: Int)(data: => T) extends Module val fromhost_q = Module(new Queue(data,1)) fromhost_q.io.enq.valid := rising && (io.in_slow.valid && in_slow_rdy || this.reset) fromhost_q.io.enq.bits := io.in_slow.bits - fromhost_q.io.deq <> io.in_fast + io.in_fast <> fromhost_q.io.deq val tohost_q = Module(new Queue(data,1)) tohost_q.io.enq <> io.out_fast From e469785f5eb05ca5685533d18feacdcbfb53808c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 3 Aug 2015 19:51:17 -0700 Subject: [PATCH 007/116] bump scala to 2.11.6 --- junctions/build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/build.sbt b/junctions/build.sbt index 0bb8c47d..cd79f1ee 100644 --- a/junctions/build.sbt +++ b/junctions/build.sbt @@ -4,7 +4,7 @@ version := "1.0" name := "junctions" -scalaVersion := "2.10.2" +scalaVersion := "2.11.6" // Provide a managed dependency on chisel if -DchiselVersion="" is supplied on the command line. libraryDependencies ++= (Seq("chisel").map { From 2ff2b43c2ceaa8f0b721ae5860c78c7b45958471 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Tue, 4 Aug 2015 13:13:44 -0700 Subject: [PATCH 008/116] Chisel3 compatibility: use >>Int instead of >>UInt The latter doesn't contract widths anymore. --- junctions/src/main/scala/slowio.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/junctions/src/main/scala/slowio.scala b/junctions/src/main/scala/slowio.scala index ae3ef249..7e926918 100644 --- a/junctions/src/main/scala/slowio.scala +++ b/junctions/src/main/scala/slowio.scala @@ -24,15 +24,15 @@ class SlowIO[T <: Data](val divisor_max: Int)(data: => T) extends Module d_shadow := io.set_divisor.bits(log2Up(divisor_max)-1, 0).toUInt h_shadow := io.set_divisor.bits(log2Up(divisor_max)-1+16, 16).toUInt } - io.divisor := hold << UInt(16) | divisor + io.divisor := (hold << 16) | divisor val count = Reg{UInt(width = log2Up(divisor_max))} val myclock = Reg{Bool()} count := count + UInt(1) - val rising = count === (divisor >> UInt(1)) + val rising = count === (divisor >> 1) val falling = count === divisor - val held = count === (divisor >> UInt(1)) + hold + val held = count === (divisor >> 1) + hold when (falling) { divisor := d_shadow From 3a1dad79948290cbbe77e4fb3713d0dba8c87749 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 27 Aug 2015 09:40:24 -0700 Subject: [PATCH 009/116] Use Vec.apply, not Vec.fill, for type nodes --- junctions/src/main/scala/hasti.scala | 6 +++--- junctions/src/main/scala/memserdes.scala | 2 +- junctions/src/main/scala/poci.scala | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 00c4c963..7b3380ac 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -74,7 +74,7 @@ class HASTIBus(amap: Seq[UInt=>Bool]) extends Module { val io = new Bundle { val master = new HASTIMasterIO().flip - val slaves = Vec.fill(amap.size){new HASTISlaveIO}.flip + val slaves = Vec(new HASTISlaveIO, amap.size).flip } // skid buffer @@ -215,8 +215,8 @@ class HASTISlaveMux(n: Int) extends Module class HASTIXbar(nMasters: Int, addressMap: Seq[UInt=>Bool]) extends Module { val io = new Bundle { - val masters = Vec.fill(nMasters){new HASTIMasterIO}.flip - val slaves = Vec.fill(addressMap.size){new HASTISlaveIO}.flip + val masters = Vec(new HASTIMasterIO, nMasters).flip + val slaves = Vec(new HASTISlaveIO, addressMap.size).flip } val buses = List.fill(nMasters){Module(new HASTIBus(addressMap))} diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index bb92690c..78212952 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -265,7 +265,7 @@ object HellaQueue class MemIOArbiter(val arbN: Int) extends MIFModule { val io = new Bundle { - val inner = Vec.fill(arbN){new MemIO}.flip + val inner = Vec(new MemIO, arbN).flip val outer = new MemIO } diff --git a/junctions/src/main/scala/poci.scala b/junctions/src/main/scala/poci.scala index f6eaece2..bfd581c7 100644 --- a/junctions/src/main/scala/poci.scala +++ b/junctions/src/main/scala/poci.scala @@ -66,7 +66,7 @@ class POCIBus(amap: Seq[UInt=>Bool]) extends Module { val io = new Bundle { val master = new POCIIO().flip - val slaves = Vec.fill(amap.size){new POCIIO} + val slaves = Vec(new POCIIO, amap.size) } val psels = PriorityEncoderOH( From f7d9628de2477d3916859b0faaacf66a2136d6b3 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 27 Aug 2015 09:40:52 -0700 Subject: [PATCH 010/116] Avoid needless use of Vec --- junctions/src/main/scala/hasti.scala | 38 ++++++++++++++-------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 7b3380ac..2e3b1ff7 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -115,7 +115,7 @@ class HASTIBus(amap: Seq[UInt=>Bool]) extends Module s.hreadyin := skb_valid || io.master.hready } } - val s1_hsels = Vec.fill(amap.size){Reg(init = Bool(false))} + val s1_hsels = Array.fill(amap.size){Reg(init = Bool(false))} val hreadyouts = io.slaves.map(_.hreadyout) val master_hready = s1_hsels.reduce(_||_) === Bool(false) || Mux1H(s1_hsels, hreadyouts) @@ -145,39 +145,39 @@ class HASTIBus(amap: Seq[UInt=>Bool]) extends Module class HASTISlaveMux(n: Int) extends Module { val io = new Bundle { - val ins = Vec.fill(n){new HASTISlaveIO} + val ins = Vec(new HASTISlaveIO, n) val out = new HASTISlaveIO().flip } // skid buffers - val skb_valid = Vec.fill(n){Reg(init = Bool(false))} - val skb_haddr = Vec.fill(n){Reg(UInt(width = SZ_HADDR))} - val skb_hwrite = Vec.fill(n){Reg(Bool())} - val skb_hsize = Vec.fill(n){Reg(UInt(width = SZ_HSIZE))} - val skb_hburst = Vec.fill(n){Reg(UInt(width = SZ_HBURST))} - val skb_hprot = Vec.fill(n){Reg(UInt(width = SZ_HPROT))} - val skb_htrans = Vec.fill(n){Reg(UInt(width = SZ_HTRANS))} - val skb_hmastlock = Vec.fill(n){Reg(Bool())} + val skb_valid = Array.fill(n){Reg(init = Bool(false))} + val skb_haddr = Array.fill(n){Reg(UInt(width = SZ_HADDR))} + val skb_hwrite = Array.fill(n){Reg(Bool())} + val skb_hsize = Array.fill(n){Reg(UInt(width = SZ_HSIZE))} + val skb_hburst = Array.fill(n){Reg(UInt(width = SZ_HBURST))} + val skb_hprot = Array.fill(n){Reg(UInt(width = SZ_HPROT))} + val skb_htrans = Array.fill(n){Reg(UInt(width = SZ_HTRANS))} + val skb_hmastlock = Array.fill(n){Reg(Bool())} val requests = (io.ins zip skb_valid) map { case (in, v) => in.hsel && in.hreadyin || v } val grants = PriorityEncoderOH(requests) - val s1_grants = Vec.fill(n){Reg(init = Bool(true))} + val s1_grants = Array.fill(n){Reg(init = Bool(true))} (s1_grants zip grants) foreach { case (g1, g) => when (io.out.hreadyout) { g1 := g } } - def sel[T <: Data](in: Vec[T], s1: Vec[T]) = + def sel[T <: Data](in: Seq[T], s1: Seq[T]) = Vec((skb_valid zip s1 zip in) map { case ((v, s), in) => Mux(v, s, in) }) - io.out.haddr := Mux1H(grants, sel(Vec(io.ins.map(_.haddr)), skb_haddr)) - io.out.hwrite := Mux1H(grants, sel(Vec(io.ins.map(_.hwrite)), skb_hwrite)) - io.out.hsize := Mux1H(grants, sel(Vec(io.ins.map(_.hsize)), skb_hsize)) - io.out.hburst := Mux1H(grants, sel(Vec(io.ins.map(_.hburst)), skb_hburst)) - io.out.hprot := Mux1H(grants, sel(Vec(io.ins.map(_.hprot)), skb_hprot)) - io.out.htrans := Mux1H(grants, sel(Vec(io.ins.map(_.htrans)), skb_htrans)) - io.out.hmastlock := Mux1H(grants, sel(Vec(io.ins.map(_.hmastlock)), skb_hmastlock)) + io.out.haddr := Mux1H(grants, sel(io.ins.map(_.haddr), skb_haddr)) + io.out.hwrite := Mux1H(grants, sel(io.ins.map(_.hwrite), skb_hwrite)) + io.out.hsize := Mux1H(grants, sel(io.ins.map(_.hsize), skb_hsize)) + io.out.hburst := Mux1H(grants, sel(io.ins.map(_.hburst), skb_hburst)) + io.out.hprot := Mux1H(grants, sel(io.ins.map(_.hprot), skb_hprot)) + io.out.htrans := Mux1H(grants, sel(io.ins.map(_.htrans), skb_htrans)) + io.out.hmastlock := Mux1H(grants, sel(io.ins.map(_.hmastlock), skb_hmastlock)) io.out.hsel := grants.reduce(_||_) (io.ins zipWithIndex) map { case (in, i) => { From b046c57284e1fdfa3105ace43434aa8c16fd8529 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 6 Aug 2015 12:46:32 -0700 Subject: [PATCH 011/116] make NASTI -> MemIO converter compliant to AXI4 spec --- junctions/src/main/scala/nasti.scala | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index c66c9526..9eea6735 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -114,17 +114,27 @@ class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule wi require(mifDataBits == nastiXDataBits, "Data sizes between LLC and MC don't agree") val (mif_cnt_out, mif_wrap_out) = Counter(io.mem.resp.fire(), mifDataBeats) - + + // according to the spec, we can't send b until the last transfer on w + val b_ok = Reg(init = Bool(true)) + when (io.nasti.aw.fire()) { b_ok := Bool(false) } + when (io.nasti.w.fire() && io.nasti.w.bits.last) { b_ok := Bool(true) } + + val id_q = Module(new Queue(UInt(width = nastiWIdBits), 2)) + id_q.io.enq.valid := io.nasti.aw.valid + id_q.io.enq.bits := io.nasti.aw.bits.id + id_q.io.deq.ready := io.nasti.b.ready && b_ok + io.mem.req_cmd.bits.addr := Mux(io.nasti.aw.valid, io.nasti.aw.bits.addr, io.nasti.ar.bits.addr) >> UInt(cacheBlockOffsetBits) io.mem.req_cmd.bits.tag := Mux(io.nasti.aw.valid, io.nasti.aw.bits.id, io.nasti.ar.bits.id) io.mem.req_cmd.bits.rw := io.nasti.aw.valid - io.mem.req_cmd.valid := (io.nasti.aw.valid && io.nasti.b.ready) || io.nasti.ar.valid + io.mem.req_cmd.valid := (io.nasti.aw.valid && id_q.io.enq.ready) || io.nasti.ar.valid io.nasti.ar.ready := io.mem.req_cmd.ready && !io.nasti.aw.valid - io.nasti.aw.ready := io.mem.req_cmd.ready && io.nasti.b.ready + io.nasti.aw.ready := io.mem.req_cmd.ready && id_q.io.enq.ready - io.nasti.b.valid := io.nasti.aw.valid && io.mem.req_cmd.ready - io.nasti.b.bits.id := io.nasti.aw.bits.id + io.nasti.b.valid := id_q.io.deq.valid && b_ok + io.nasti.b.bits.id := id_q.io.deq.bits io.nasti.b.bits.resp := UInt(0) io.nasti.w.ready := io.mem.req_data.ready From 75ec7529af8a5e23e9d613f452be8c8e8726ce53 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 6 Aug 2015 12:48:35 -0700 Subject: [PATCH 012/116] implement NASTI Interconnect generating from configuration address map --- junctions/src/main/scala/nasti.scala | 401 ++++++++++++++++++++++++++- 1 file changed, 400 insertions(+), 1 deletion(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 9eea6735..bddf5b0e 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -1,13 +1,20 @@ -// See LICENSE for license details. +/// See LICENSE for license details. package junctions import Chisel._ import scala.math.max +import scala.collection.mutable.ArraySeq +import scala.collection.mutable.HashMap +case object MMIOBase extends Field[BigInt] case object NASTIDataBits extends Field[Int] case object NASTIAddrBits extends Field[Int] case object NASTIIdBits extends Field[Int] +object bigIntPow2 { + def apply(in: BigInt): Boolean = in > 0 && ((in & (in-1)) == 0) +} + trait NASTIParameters extends UsesParameters { val nastiXDataBits = params(NASTIDataBits) val nastiWStrobeBits = nastiXDataBits / 8 @@ -149,3 +156,395 @@ class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule wi io.nasti.r.bits.resp := UInt(0) io.mem.resp.ready := io.nasti.r.ready } + +class NASTIArbiter(val arbN: Int) extends NASTIModule { + val io = new Bundle { + val master = Vec.fill(arbN) { new NASTISlaveIO } + val slave = new NASTIMasterIO + } + + if (arbN > 1) { + val arbIdBits = log2Up(arbN) + + val ar_arb = Module(new RRArbiter(new NASTIReadAddressChannel, arbN)) + val aw_arb = Module(new RRArbiter(new NASTIWriteAddressChannel, arbN)) + + val slave_r_arb_id = io.slave.r.bits.id(arbIdBits - 1, 0) + val slave_b_arb_id = io.slave.b.bits.id(arbIdBits - 1, 0) + + val w_chosen = Reg(UInt(width = arbIdBits)) + val w_done = Reg(init = Bool(true)) + + when (aw_arb.io.out.fire()) { + w_chosen := aw_arb.io.chosen + w_done := Bool(false) + } + + when (io.slave.w.fire() && io.slave.w.bits.last) { + w_done := Bool(true) + } + + for (i <- 0 until arbN) { + val m_ar = io.master(i).ar + val m_aw = io.master(i).aw + val m_r = io.master(i).r + val m_b = io.master(i).b + val a_ar = ar_arb.io.in(i) + val a_aw = aw_arb.io.in(i) + val m_w = io.master(i).w + + a_ar <> m_ar + a_ar.bits.id := Cat(m_ar.bits.id, UInt(i, arbIdBits)) + + a_aw <> m_aw + a_aw.bits.id := Cat(m_aw.bits.id, UInt(i, arbIdBits)) + + m_r.valid := io.slave.r.valid && slave_r_arb_id === UInt(i) + m_r.bits := io.slave.r.bits + m_r.bits.id := io.slave.r.bits.id >> UInt(arbIdBits) + + m_b.valid := io.slave.b.valid && slave_b_arb_id === UInt(i) + m_b.bits := io.slave.b.bits + m_b.bits.id := io.slave.b.bits.id >> UInt(arbIdBits) + + m_w.ready := io.slave.w.ready && w_chosen === UInt(i) && !w_done + } + + io.slave.r.ready := io.master(slave_r_arb_id).r.ready + io.slave.b.ready := io.master(slave_b_arb_id).b.ready + + io.slave.w.bits := io.master(w_chosen).w.bits + io.slave.w.valid := io.master(w_chosen).w.valid && !w_done + + io.slave.ar <> ar_arb.io.out + io.slave.aw <> aw_arb.io.out + aw_arb.io.out.ready := io.slave.aw.ready && w_done + + } else { io.slave <> io.master.head } +} + +// TODO: More efficient implementation a/la Chisel Stdlib +class NASTIReadDataArbiter(arbN: Int) extends NASTIModule { + val io = new Bundle { + val in = Vec.fill(arbN) { Decoupled(new NASTIReadDataChannel) }.flip + val out = Decoupled(new NASTIReadDataChannel) + } + + def rotateLeft[T <: Data](norm: Vec[T], rot: UInt): Vec[T] = { + val n = norm.size + Vec.tabulate(n) { i => + Mux(rot < UInt(n - i), norm(UInt(i) + rot), norm(rot - UInt(n - i))) + } + } + + val lockIdx = Reg(init = UInt(0, log2Up(arbN))) + val locked = Reg(init = Bool(false)) + + // use rotation to give priority to the input after the last one granted + val choice = PriorityMux( + rotateLeft(Vec(io.in.map(_.valid)), lockIdx + UInt(1)), + rotateLeft(Vec((0 until arbN).map(UInt(_))), lockIdx + UInt(1))) + + val chosen = Mux(locked, lockIdx, choice) + + for (i <- 0 until arbN) { + io.in(i).ready := io.out.ready && chosen === UInt(i) + } + + io.out.valid := io.in(chosen).valid + io.out.bits := io.in(chosen).bits + + when (io.out.fire()) { + when (!locked) { + lockIdx := choice + locked := !io.out.bits.last + } .elsewhen (io.out.bits.last) { + locked := Bool(false) + } + } +} + +/** A slave that send decode error for every request it receives */ +class NASTIErrorSlave extends NASTIModule { + val io = new NASTISlaveIO + + val r_queue = Module(new Queue(UInt(width = nastiRIdBits), 2)) + r_queue.io.enq.valid := io.ar.valid + r_queue.io.enq.bits := io.ar.bits.id + io.ar.ready := r_queue.io.enq.ready + io.r.valid := r_queue.io.deq.valid + io.r.bits.id := r_queue.io.deq.bits + io.r.bits.resp := Bits("b11") + io.r.bits.last := Bool(true) + r_queue.io.deq.ready := io.r.ready + + val draining = Reg(init = Bool(false)) + io.w.ready := draining + + when (io.aw.fire()) { draining := Bool(true) } + when (io.w.fire() && io.w.bits.last) { draining := Bool(false) } + + val b_queue = Module(new Queue(UInt(width = nastiWIdBits), 2)) + b_queue.io.enq.valid := io.aw.valid && !draining + b_queue.io.enq.bits := io.aw.bits.id + io.aw.ready := b_queue.io.enq.ready && !draining + io.b.valid := b_queue.io.deq.valid && !draining + io.b.bits.id := b_queue.io.deq.bits + io.b.bits.resp := Bits("b11") + b_queue.io.deq.ready := io.b.ready && !draining +} + +class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { + val nSlaves = addrmap.size + + val io = new Bundle { + val master = new NASTISlaveIO + val slave = Vec.fill(nSlaves) { new NASTIMasterIO } + } + + var ar_ready = Bool(false) + var aw_ready = Bool(false) + var w_ready = Bool(false) + var r_valid_addr = Bool(false) + var w_valid_addr = Bool(false) + + addrmap.zip(io.slave).zipWithIndex.foreach { case (((base, size), s), i) => + val bound = base + size + + require(bigIntPow2(size), + s"Region size $size is not a power of 2") + require(base % size == 0, + f"Region base address $base%x not divisible by $size%d" ) + + val ar_addr = io.master.ar.bits.addr + val ar_match = ar_addr >= UInt(base) && ar_addr < UInt(bound) + + s.ar.valid := io.master.ar.valid && ar_match + s.ar.bits := io.master.ar.bits + ar_ready = ar_ready || (s.ar.ready && ar_match) + r_valid_addr = r_valid_addr || ar_match + + val aw_addr = io.master.aw.bits.addr + val aw_match = aw_addr >= UInt(base) && aw_addr < UInt(bound) + + s.aw.valid := io.master.aw.valid && aw_match + s.aw.bits := io.master.aw.bits + aw_ready = aw_ready || (s.aw.ready && aw_match) + w_valid_addr = w_valid_addr || aw_match + + val chosen = Reg(init = Bool(false)) + when (s.aw.fire()) { chosen := Bool(true) } + when (s.w.fire() && s.w.bits.last) { chosen := Bool(false) } + + s.w.valid := io.master.w.valid && chosen + s.w.bits := io.master.w.bits + w_ready = w_ready || (s.w.ready && chosen) + } + + val err_slave = Module(new NASTIErrorSlave) + err_slave.io.ar.valid := !r_valid_addr && io.master.ar.valid + err_slave.io.ar.bits := io.master.ar.bits + err_slave.io.aw.valid := !w_valid_addr && io.master.aw.valid + err_slave.io.aw.bits := io.master.aw.bits + err_slave.io.w.valid := io.master.w.valid + err_slave.io.w.bits := io.master.w.bits + + io.master.ar.ready := ar_ready || (!r_valid_addr && err_slave.io.ar.ready) + io.master.aw.ready := aw_ready || (!w_valid_addr && err_slave.io.aw.ready) + io.master.w.ready := w_ready || err_slave.io.w.ready + + val b_arb = Module(new RRArbiter(new NASTIWriteResponseChannel, nSlaves + 1)) + val r_arb = Module(new NASTIReadDataArbiter(nSlaves + 1)) + + for (i <- 0 until nSlaves) { + b_arb.io.in(i) <> io.slave(i).b + r_arb.io.in(i) <> io.slave(i).r + } + + b_arb.io.in(nSlaves) <> err_slave.io.b + r_arb.io.in(nSlaves) <> err_slave.io.r + + io.master.b <> b_arb.io.out + io.master.r <> r_arb.io.out +} + +class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) + extends NASTIModule { + val io = new Bundle { + val masters = Vec.fill(nMasters) { new NASTISlaveIO } + val slaves = Vec.fill(nSlaves) { new NASTIMasterIO } + } + + val routers = Vec.fill(nMasters) { Module(new NASTIRouter(addrmap)).io } + val arbiters = Vec.fill(nSlaves) { Module(new NASTIArbiter(nMasters)).io } + + for (i <- 0 until nMasters) { + routers(i).master <> io.masters(i) + } + + for (i <- 0 until nSlaves) { + arbiters(i).master <> Vec(routers.map(r => r.slave(i))) + io.slaves(i) <> arbiters(i).slave + } +} + +case object NASTINMasters extends Field[Int] +case object NASTINSlaves extends Field[Int] + +object AddrMapTypes { + type AddrMapEntry = (String, Option[BigInt], MemRegion) + type AddrMap = Seq[AddrMapEntry] +} +import AddrMapTypes._ + +abstract class MemRegion { def size: BigInt } + +case class MemSize(size: BigInt) extends MemRegion +case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion + +object Submap { + def apply(size: BigInt, entries: AddrMapEntry*) = + new MemSubmap(size, entries) +} + +case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt) + +class AddrHashMap(addrmap: AddrMap) { + val mapping = new HashMap[String, AddrHashMapEntry] + + private def genPairs(addrmap: AddrMap): Seq[(String, AddrHashMapEntry)] = { + var ind = 0 + var base = BigInt(0) + var pairs = Seq[(String, AddrHashMapEntry)]() + addrmap.foreach { case (name, startOpt, region) => + region match { + case MemSize(size) => { + if (!startOpt.isEmpty) base = startOpt.get + pairs = (name, AddrHashMapEntry(ind, base, size)) +: pairs + base += size + ind += 1 + } + case MemSubmap(size, submap) => { + if (!startOpt.isEmpty) base = startOpt.get + val subpairs = genPairs(submap).map { + case (subname, AddrHashMapEntry(subind, subbase, subsize)) => + (name + ":" + subname, + AddrHashMapEntry(ind + subind, base + subbase, subsize)) + } + pairs = subpairs ++ pairs + ind += subpairs.size + base += size + } + } + } + pairs + } + + for ((name, ind) <- genPairs(addrmap)) { mapping(name) = ind } + + def nEntries: Int = mapping.size + def apply(name: String): AddrHashMapEntry = mapping(name) + def get(name: String): Option[AddrHashMapEntry] = mapping.get(name) + def sortedEntries(): Seq[(String, BigInt, BigInt)] = { + val arr = new Array[(String, BigInt, BigInt)](mapping.size) + mapping.foreach { case (name, AddrHashMapEntry(port, base, size)) => + arr(port) = (name, base, size) + } + arr.toSeq + } +} + +case object NASTIAddrMap extends Field[AddrMap] +case object NASTIAddrHashMap extends Field[AddrHashMap] + +class NASTIInterconnectIO(val nMasters: Int, val nSlaves: Int) extends Bundle { + /* This is a bit confusing. The interconnect is a slave to the masters and + * a master to the slaves. Hence why the declarations seem to be backwards. */ + val masters = Vec.fill(nMasters) { new NASTISlaveIO } + val slaves = Vec.fill(nSlaves) { new NASTIMasterIO } + override def cloneType = + new NASTIInterconnectIO(nMasters, nSlaves).asInstanceOf[this.type] +} + +abstract class NASTIInterconnect extends NASTIModule { + val nMasters: Int + val nSlaves: Int + + lazy val io = new NASTIInterconnectIO(nMasters, nSlaves) +} + +class NASTIRecursiveInterconnect( + val nMasters: Int, val nSlaves: Int, + addrmap: AddrMap, base: BigInt = 0) extends NASTIInterconnect { + + private def mapCountSlaves(addrmap: AddrMap): Int = { + addrmap.map { + case (_, _, MemSize(_)) => 1 + case (_, _, MemSubmap(_, submap)) => mapCountSlaves(submap) + }.reduceLeft(_ + _) + } + + var lastEnd = base + var slaveInd = 0 + val levelSize = addrmap.size + + val realAddrMap = new ArraySeq[(BigInt, BigInt)](addrmap.size) + + addrmap.zipWithIndex.foreach { case ((_, startOpt, region), i) => + val start = startOpt.getOrElse(lastEnd) + val size = region.size + realAddrMap(i) = (start, size) + lastEnd = start + size + } + + val flatSlaves = if (nMasters > 1) { + val xbar = Module(new NASTICrossbar(nMasters, levelSize, realAddrMap)) + xbar.io.masters <> io.masters + xbar.io.slaves + } else { + val router = Module(new NASTIRouter(realAddrMap)) + router.io.master <> io.masters.head + router.io.slave + } + + addrmap.zip(realAddrMap).zipWithIndex.foreach { + case (((_, _, region), (start, size)), i) => { + region match { + case MemSize(_) => + io.slaves(slaveInd) <> flatSlaves(i) + slaveInd += 1 + case MemSubmap(_, submap) => + val subSlaves = mapCountSlaves(submap) + val ic = Module(new NASTIRecursiveInterconnect( + 1, subSlaves, submap, start)) + ic.io.masters.head <> flatSlaves(i) + io.slaves.drop(slaveInd).take(subSlaves).zip(ic.io.slaves).foreach { + case (s, m) => s <> m + } + slaveInd += subSlaves + } + } + } +} + +class NASTITopInterconnect extends NASTIInterconnect { + val nMasters = params(NASTINMasters) + val nSlaves = params(NASTINSlaves) + + bigIntPow2(params(MMIOBase)) + + val temp = Module(new NASTIRecursiveInterconnect( + nMasters, nSlaves, params(NASTIAddrMap))) + + temp.io.masters.zip(io.masters).foreach { case (t, i) => + t.ar <> i.ar + t.aw <> i.aw + // this queue is necessary to break up the aw - w dependence + // introduced by the TileLink -> NASTI converter + t.w <> Queue(i.w) + i.b <> t.b + i.r <> t.r + } + //temp.io.masters <> io.masters + io.slaves <> temp.io.slaves +} From ede1ada05322aaa36f58fa172007188f33cade40 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 10 Aug 2015 19:00:51 -0700 Subject: [PATCH 013/116] Add converters and utilities for simpler peripheral interface (SMI) --- junctions/src/main/scala/smi.scala | 269 +++++++++++++++++++++++++++++ 1 file changed, 269 insertions(+) create mode 100644 junctions/src/main/scala/smi.scala diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala new file mode 100644 index 00000000..f3495ccc --- /dev/null +++ b/junctions/src/main/scala/smi.scala @@ -0,0 +1,269 @@ +package junctions + +import Chisel._ + +class SMIReq(val dataWidth: Int, val addrWidth: Int) extends Bundle { + val rw = Bool() + val addr = UInt(width = addrWidth) + val data = Bits(width = dataWidth) + + override def cloneType = + new SMIReq(dataWidth, addrWidth).asInstanceOf[this.type] +} + +class SMIIO(val dataWidth: Int, val addrWidth: Int) extends Bundle { + val req = Decoupled(new SMIReq(dataWidth, addrWidth)) + val resp = Decoupled(Bits(width = dataWidth)).flip + + override def cloneType = + new SMIIO(dataWidth, addrWidth).asInstanceOf[this.type] +} + +abstract class SMIPeripheral extends Module { + val dataWidth: Int + val addrWidth: Int + + lazy val io = new SMIIO(dataWidth, addrWidth).flip +} + +class SMIMem(val dataWidth: Int, val memDepth: Int) extends SMIPeripheral { + // override + val addrWidth = log2Up(memDepth) + + val mem = SeqMem(Bits(width = dataWidth), memDepth) + + val ren = io.req.fire() && !io.req.bits.rw + val wen = io.req.fire() && io.req.bits.rw + + io.resp.valid := Reg(next = ren) + io.resp.bits := mem.read(io.req.bits.addr, ren) + + when (wen) { mem.write(io.req.bits.addr, io.req.bits.data) } + + val resp_valid = Reg(init = Bool(false)) + + when (io.resp.fire()) { resp_valid := Bool(false) } + when (io.req.fire()) { resp_valid := Bool(true) } + + io.resp.valid := resp_valid + io.req.ready := !resp_valid +} + +class SMIArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) + extends Module { + val io = new Bundle { + val in = Vec.fill(n) { new SMIIO(dataWidth, addrWidth) }.flip + val out = new SMIIO(dataWidth, addrWidth) + } + + val wait_resp = Reg(init = Bool(false)) + val choice = Reg(UInt(width = log2Up(n))) + + val req_arb = Module(new RRArbiter(new SMIReq(dataWidth, addrWidth), n)) + req_arb.io.in <> io.in.map(_.req) + req_arb.io.out.ready := io.out.req.ready && !wait_resp + + io.out.req.bits := req_arb.io.out.bits + io.out.req.valid := req_arb.io.out.valid && !wait_resp + + when (io.out.req.fire()) { + choice := req_arb.io.chosen + wait_resp := Bool(true) + } + + when (io.out.resp.fire()) { wait_resp := Bool(false) } + + for ((resp, i) <- io.in.map(_.resp).zipWithIndex) { + resp.bits := io.out.resp.bits + resp.valid := io.out.resp.valid && choice === UInt(i) + } + + io.out.resp.ready := io.in(choice).resp.ready +} + +class SMIIONASTIReadIOConverter(val dataWidth: Int, val addrWidth: Int) + extends NASTIModule { + val io = new Bundle { + val ar = Decoupled(new NASTIReadAddressChannel).flip + val r = Decoupled(new NASTIReadDataChannel) + val smi = new SMIIO(dataWidth, addrWidth) + } + + private val maxWordsPerBeat = nastiXDataBits / dataWidth + private val wordCountBits = log2Up(maxWordsPerBeat) + private val byteOffBits = log2Up(dataWidth / 8) + private val addrOffBits = addrWidth + byteOffBits + + private def calcWordCount(size: UInt): UInt = + (UInt(1) << (size - UInt(byteOffBits))) - UInt(1) + + val (s_idle :: s_read :: s_resp :: Nil) = Enum(Bits(), 3) + val state = Reg(init = s_idle) + + val nWords = Reg(UInt(width = wordCountBits)) + val nBeats = Reg(UInt(width = nastiXLenBits)) + val addr = Reg(UInt(width = addrWidth)) + val id = Reg(UInt(width = nastiRIdBits)) + + val byteOff = Reg(UInt(width = byteOffBits)) + val sendInd = Reg(init = UInt(0, wordCountBits)) + val recvInd = Reg(init = UInt(0, wordCountBits)) + val sendDone = Reg(init = Bool(false)) + + val buffer = Reg(init = Vec.fill(maxWordsPerBeat) { Bits(0, dataWidth) }) + + io.ar.ready := (state === s_idle) + + io.smi.req.valid := (state === s_read) && !sendDone + io.smi.req.bits.rw := Bool(false) + io.smi.req.bits.addr := addr + + io.smi.resp.ready := (state === s_read) + + io.r.valid := (state === s_resp) + io.r.bits.resp := Bits(0) + io.r.bits.data := buffer.toBits + io.r.bits.id := id + io.r.bits.last := (nBeats === UInt(0)) + + when (io.ar.fire()) { + when (io.ar.bits.size < UInt(byteOffBits)) { + nWords := UInt(0) + byteOff := io.ar.bits.addr(byteOffBits - 1, 0) + } .otherwise { + nWords := calcWordCount(io.ar.bits.size) + byteOff := UInt(0) + } + nBeats := io.ar.bits.len + addr := io.ar.bits.addr(addrOffBits - 1, byteOffBits) + id := io.ar.bits.id + state := s_read + } + + when (io.smi.req.fire()) { + addr := addr + UInt(1) + sendInd := sendInd + UInt(1) + sendDone := (sendInd === nWords) + } + + when (io.smi.resp.fire()) { + recvInd := recvInd + UInt(1) + buffer(recvInd) := io.smi.resp.bits >> Cat(byteOff, UInt(0, 3)) + when (recvInd === nWords) { state := s_resp } + } + + when (io.r.fire()) { + recvInd := UInt(0) + sendInd := UInt(0) + sendDone := Bool(false) + // clear all the registers in the buffer + buffer.foreach(_ := Bits(0)) + nBeats := nBeats - UInt(1) + state := Mux(io.r.bits.last, s_idle, s_read) + } +} + +class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) + extends NASTIModule { + val io = new Bundle { + val aw = Decoupled(new NASTIWriteAddressChannel).flip + val w = Decoupled(new NASTIWriteDataChannel).flip + val b = Decoupled(new NASTIWriteResponseChannel) + val smi = new SMIIO(dataWidth, addrWidth) + } + + private val dataBytes = dataWidth / 8 + private val maxWordsPerBeat = nastiXDataBits / dataWidth + private val byteOffBits = log2Floor(dataBytes) + private val addrOffBits = addrWidth + byteOffBits + + assert(!io.aw.valid || io.aw.bits.size >= UInt(byteOffBits), + "NASTI size must be >= SMI size") + + val id = Reg(UInt(width = nastiWIdBits)) + val addr = Reg(UInt(width = addrWidth)) + + def makeStrobe(size: UInt, strb: UInt) = { + val sizemask = (UInt(1) << (UInt(1) << size)) - UInt(1) + val bytemask = sizemask & strb + Vec.tabulate(maxWordsPerBeat){i => bytemask(dataBytes * i)}.toBits + //val strbmask = Vec.tabulate(maxWordsPerBeat){i => strb(dataBytes * i)}.toBits + //sizemask & strbmask + } + + val size = Reg(UInt(width = nastiXSizeBits)) + val strb = Reg(UInt(width = maxWordsPerBeat)) + val data = Reg(UInt(width = nastiXDataBits)) + val last = Reg(Bool()) + + val s_idle :: s_data :: s_send :: s_ack :: s_resp :: Nil = Enum(Bits(), 5) + val state = Reg(init = s_idle) + + io.aw.ready := (state === s_idle) + io.w.ready := (state === s_data) + io.smi.req.valid := (state === s_send) && strb(0) + io.smi.req.bits.rw := Bool(true) + io.smi.req.bits.addr := addr + io.smi.req.bits.data := data(dataWidth - 1, 0) + io.smi.resp.ready := (state === s_ack) + io.b.valid := (state === s_resp) + io.b.bits.resp := Bits(0) + io.b.bits.id := id + + val jump = PriorityMux(strb(maxWordsPerBeat - 1, 1), + (1 until maxWordsPerBeat).map(UInt(_))) + + when (io.aw.fire()) { + addr := io.aw.bits.addr(addrOffBits - 1, byteOffBits) + id := io.aw.bits.id + //size := io.aw.bits.size - UInt(byteOffBits) + size := io.aw.bits.size + last := Bool(false) + state := s_data + } + + when (io.w.fire()) { + last := io.w.bits.last + strb := makeStrobe(size, io.w.bits.strb) + data := io.w.bits.data + state := s_send + } + + when (state === s_send) { + when (strb === UInt(0)) { + state := Mux(last, s_ack, s_data) + } .elsewhen (io.smi.req.ready || !strb(0)) { + strb := strb >> jump + data := data >> Cat(jump, UInt(0, log2Up(dataWidth))) + addr := addr + jump + } + } + + when (io.smi.resp.fire()) { state := s_resp } + + when (io.b.fire()) { state := s_idle } +} + +class SMIIONASTISlaveIOConverter(val dataWidth: Int, val addrWidth: Int) + extends NASTIModule { + val io = new Bundle { + val nasti = new NASTISlaveIO + val smi = new SMIIO(dataWidth, addrWidth) + } + + require(isPow2(dataWidth), "SMI data width must be power of 2") + + val reader = Module(new SMIIONASTIReadIOConverter(dataWidth, addrWidth)) + reader.io.ar <> io.nasti.ar + io.nasti.r <> reader.io.r + + val writer = Module(new SMIIONASTIWriteIOConverter(dataWidth, addrWidth)) + writer.io.aw <> io.nasti.aw + writer.io.w <> io.nasti.w + io.nasti.b <> writer.io.b + + val arb = Module(new SMIArbiter(2, dataWidth, addrWidth)) + arb.io.in(0) <> reader.io.smi + arb.io.in(1) <> writer.io.smi + io.smi <> arb.io.out +} From 8a8d52da4f2da4c01d40e54390eddd9b807f9724 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 10 Sep 2015 17:32:40 -0700 Subject: [PATCH 014/116] add convenient constructors for NASTI channels --- junctions/src/main/scala/nasti.scala | 69 ++++++++++++++++++++++++++++ junctions/src/main/scala/smi.scala | 11 ++--- 2 files changed, 74 insertions(+), 6 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index bddf5b0e..26d198b9 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -113,6 +113,75 @@ class NASTIReadDataChannel extends NASTIResponseChannel with HasNASTIData { val user = UInt(width = nastiRUserBits) } +object NASTIWriteAddressChannel { + def apply(id: UInt, addr: UInt, size: UInt, len: UInt) = { + val aw = Wire(new NASTIWriteAddressChannel) + aw.id := id + aw.addr := addr + aw.len := len + aw.size := size + aw.burst := UInt("b01") + aw.lock := Bool(false) + aw.cache := UInt("b0000") + aw.prot := UInt("b000") + aw.qos := UInt("b0000") + aw.region := UInt("b0000") + aw.user := UInt(0) + aw + } +} + +object NASTIReadAddressChannel { + def apply(id: UInt, addr: UInt, size: UInt, len: UInt) = { + val ar = Wire(new NASTIReadAddressChannel) + ar.id := id + ar.addr := addr + ar.len := len + ar.size := size + ar.burst := UInt("b01") + ar.lock := Bool(false) + ar.cache := UInt(0) + ar.prot := UInt(0) + ar.qos := UInt(0) + ar.region := UInt(0) + ar.user := UInt(0) + ar + } +} + +object NASTIWriteDataChannel { + def apply(strb: UInt, data: UInt, last: Bool) = { + val w = Wire(new NASTIWriteDataChannel) + w.strb := strb + w.data := data + w.last := last + w.user := UInt(0) + w + } +} + +object NASTIReadDataChannel { + def apply(id: UInt, data: UInt, last: Bool, resp: UInt = UInt(0)) = { + val r = Wire(new NASTIReadDataChannel) + r.id := id + r.data := data + r.last := last + r.resp := resp + r.user := UInt(0) + r + } +} + +object NASTIWriteResponseChannel { + def apply(id: UInt, resp: UInt = UInt(0)) = { + val b = Wire(new NASTIWriteResponseChannel) + b.id := id + b.resp := resp + b.user := UInt(0) + b + } +} + class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule with NASTIParameters { val io = new Bundle { val nasti = new NASTISlaveIO diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index f3495ccc..2b653bb3 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -121,10 +121,10 @@ class SMIIONASTIReadIOConverter(val dataWidth: Int, val addrWidth: Int) io.smi.resp.ready := (state === s_read) io.r.valid := (state === s_resp) - io.r.bits.resp := Bits(0) - io.r.bits.data := buffer.toBits - io.r.bits.id := id - io.r.bits.last := (nBeats === UInt(0)) + io.r.bits := NASTIReadDataChannel( + id = id, + data = buffer.toBits, + last = (nBeats === UInt(0))) when (io.ar.fire()) { when (io.ar.bits.size < UInt(byteOffBits)) { @@ -207,8 +207,7 @@ class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) io.smi.req.bits.data := data(dataWidth - 1, 0) io.smi.resp.ready := (state === s_ack) io.b.valid := (state === s_resp) - io.b.bits.resp := Bits(0) - io.b.bits.id := id + io.b.bits := NASTIWriteResponseChannel(id) val jump = PriorityMux(strb(maxWordsPerBeat - 1, 1), (1 until maxWordsPerBeat).map(UInt(_))) From 6387d31c624b5b8b079baf47e6bc03bd6646e666 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 10 Sep 2015 17:33:48 -0700 Subject: [PATCH 015/116] add comments and small fixes for NASTI and SMI --- junctions/src/main/scala/nasti.scala | 12 +++++++++++- junctions/src/main/scala/smi.scala | 13 ++++++++++--- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 26d198b9..b2282399 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -226,6 +226,7 @@ class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule wi io.mem.resp.ready := io.nasti.r.ready } +/** Arbitrate among arbN masters requesting to a single slave */ class NASTIArbiter(val arbN: Int) extends NASTIModule { val io = new Bundle { val master = Vec.fill(arbN) { new NASTISlaveIO } @@ -292,7 +293,8 @@ class NASTIArbiter(val arbN: Int) extends NASTIModule { } else { io.slave <> io.master.head } } -// TODO: More efficient implementation a/la Chisel Stdlib +/** Locking RR arbiter for NASTI read data channel + * Arbiter locks until last message in channel is sent */ class NASTIReadDataArbiter(arbN: Int) extends NASTIModule { val io = new Bundle { val in = Vec.fill(arbN) { Decoupled(new NASTIReadDataChannel) }.flip @@ -363,6 +365,9 @@ class NASTIErrorSlave extends NASTIModule { b_queue.io.deq.ready := io.b.ready && !draining } +/** Take a single NASTI master and route its requests to various slaves + * @param addrmap a sequence of base address + memory size pairs, + * on for each slave interface */ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { val nSlaves = addrmap.size @@ -437,6 +442,11 @@ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { io.master.r <> r_arb.io.out } +/** Crossbar between multiple NASTI masters and slaves + * @param nMasters the number of NASTI masters + * @param nSlaves the number of NASTI slaves + * @param addrmap a sequence of base - size pairs; + * size of addrmap should be nSlaves */ class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { val io = new Bundle { diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index 2b653bb3..e1955ab1 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -11,6 +11,9 @@ class SMIReq(val dataWidth: Int, val addrWidth: Int) extends Bundle { new SMIReq(dataWidth, addrWidth).asInstanceOf[this.type] } +/** Simple Memory Interface IO. Used to communicate with PCR and SCR + * @param dataWidth the width in bits of the data field + * @param addrWidth the width in bits of the addr field */ class SMIIO(val dataWidth: Int, val addrWidth: Int) extends Bundle { val req = Decoupled(new SMIReq(dataWidth, addrWidth)) val resp = Decoupled(Bits(width = dataWidth)).flip @@ -26,6 +29,7 @@ abstract class SMIPeripheral extends Module { lazy val io = new SMIIO(dataWidth, addrWidth).flip } +/** A simple sequential memory accessed through SMI */ class SMIMem(val dataWidth: Int, val memDepth: Int) extends SMIPeripheral { // override val addrWidth = log2Up(memDepth) @@ -35,9 +39,6 @@ class SMIMem(val dataWidth: Int, val memDepth: Int) extends SMIPeripheral { val ren = io.req.fire() && !io.req.bits.rw val wen = io.req.fire() && io.req.bits.rw - io.resp.valid := Reg(next = ren) - io.resp.bits := mem.read(io.req.bits.addr, ren) - when (wen) { mem.write(io.req.bits.addr, io.req.bits.data) } val resp_valid = Reg(init = Bool(false)) @@ -46,9 +47,14 @@ class SMIMem(val dataWidth: Int, val memDepth: Int) extends SMIPeripheral { when (io.req.fire()) { resp_valid := Bool(true) } io.resp.valid := resp_valid + io.resp.bits := mem.read(io.req.bits.addr, ren) io.req.ready := !resp_valid } +/** Arbitrate among several SMI clients + * @param n the number of clients + * @param dataWidth SMI data width + * @param addrWidth SMI address width */ class SMIArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) extends Module { val io = new Bundle { @@ -243,6 +249,7 @@ class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) when (io.b.fire()) { state := s_idle } } +/** Convert NASTI protocol to SMI protocol */ class SMIIONASTISlaveIOConverter(val dataWidth: Int, val addrWidth: Int) extends NASTIModule { val io = new Bundle { From 4c3c3c630ea6e82c8a68847ae5c6153138538dc4 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 10 Sep 2015 17:55:10 -0700 Subject: [PATCH 016/116] add assertions to make sure NASTI -> MemIO converter takes in requests of the right size and len --- junctions/src/main/scala/nasti.scala | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index b2282399..7d31a882 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -191,6 +191,15 @@ class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule wi require(mifDataBits == nastiXDataBits, "Data sizes between LLC and MC don't agree") val (mif_cnt_out, mif_wrap_out) = Counter(io.mem.resp.fire(), mifDataBeats) + assert(!io.nasti.aw.valid || io.nasti.aw.bits.size === UInt(log2Up(mifDataBits/8)), + "NASTI data size does not match MemIO data size") + assert(!io.nasti.ar.valid || io.nasti.ar.bits.size === UInt(log2Up(mifDataBits/8)), + "NASTI data size does not match MemIO data size") + assert(!io.nasti.aw.valid || io.nasti.aw.bits.len === UInt(mifDataBeats - 1), + "NASTI length does not match number of MemIO beats") + assert(!io.nasti.ar.valid || io.nasti.ar.bits.len === UInt(mifDataBeats - 1), + "NASTI length does not match number of MemIO beats") + // according to the spec, we can't send b until the last transfer on w val b_ok = Reg(init = Bool(true)) when (io.nasti.aw.fire()) { b_ok := Bool(false) } From 4db6124b2a648881822ff0962d172390635d13eb Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Fri, 18 Sep 2015 09:42:41 -0700 Subject: [PATCH 017/116] NASTIErrorSlave should print address --- junctions/src/main/scala/nasti.scala | 3 +++ 1 file changed, 3 insertions(+) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 7d31a882..38da8378 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -348,6 +348,9 @@ class NASTIReadDataArbiter(arbN: Int) extends NASTIModule { class NASTIErrorSlave extends NASTIModule { val io = new NASTISlaveIO + when (io.ar.fire()) { printf("Invalid read address %x\n", io.ar.bits.addr) } + when (io.aw.fire()) { printf("Invalid write address %x\n", io.aw.bits.addr) } + val r_queue = Module(new Queue(UInt(width = nastiRIdBits), 2)) r_queue.io.enq.valid := io.ar.valid r_queue.io.enq.bits := io.ar.bits.id From 27745204ebc756512028d5944fd996a86c845e70 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 22 Sep 2015 09:42:57 -0700 Subject: [PATCH 018/116] ErrorSlave returns response of correct length for reads --- junctions/src/main/scala/nasti.scala | 33 +++++++++++++++++++++------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 38da8378..37388403 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -351,15 +351,32 @@ class NASTIErrorSlave extends NASTIModule { when (io.ar.fire()) { printf("Invalid read address %x\n", io.ar.bits.addr) } when (io.aw.fire()) { printf("Invalid write address %x\n", io.aw.bits.addr) } - val r_queue = Module(new Queue(UInt(width = nastiRIdBits), 2)) - r_queue.io.enq.valid := io.ar.valid - r_queue.io.enq.bits := io.ar.bits.id - io.ar.ready := r_queue.io.enq.ready - io.r.valid := r_queue.io.deq.valid - io.r.bits.id := r_queue.io.deq.bits + val r_queue = Module(new Queue(new NASTIReadAddressChannel, 2)) + r_queue.io.enq <> io.ar + + val responding = Reg(init = Bool(false)) + val beats_left = Reg(init = UInt(0, nastiXLenBits)) + + when (!responding && r_queue.io.deq.valid) { + responding := Bool(true) + beats_left := r_queue.io.deq.bits.len + } + + io.r.valid := r_queue.io.deq.valid && responding + io.r.bits.id := r_queue.io.deq.bits.id + io.r.bits.data := UInt(0) io.r.bits.resp := Bits("b11") - io.r.bits.last := Bool(true) - r_queue.io.deq.ready := io.r.ready + io.r.bits.last := beats_left === UInt(0) + + r_queue.io.deq.ready := io.r.fire() && io.r.bits.last + + when (io.r.fire()) { + when (beats_left === UInt(0)) { + responding := Bool(false) + } .otherwise { + beats_left := beats_left - UInt(0) + } + } val draining = Reg(init = Bool(false)) io.w.ready := draining From 64ab45e2e422f6fde5eb8ac862e8fa8c2a50f16c Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 22 Sep 2015 09:43:22 -0700 Subject: [PATCH 019/116] add RWX permission bits to address map --- junctions/src/main/scala/nasti.scala | 68 +++++++++++++++++++--------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 37388403..81a9be28 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -499,45 +499,58 @@ class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) case object NASTINMasters extends Field[Int] case object NASTINSlaves extends Field[Int] -object AddrMapTypes { +object AddrMap { type AddrMapEntry = (String, Option[BigInt], MemRegion) - type AddrMap = Seq[AddrMapEntry] + type AddrMapSeq = Seq[AddrMapEntry] + + val R = 0x4 + val W = 0x2 + val X = 0x1 + val RW = R | W + val RX = R | X + val RWX = R | W | X } -import AddrMapTypes._ +import AddrMap._ abstract class MemRegion { def size: BigInt } -case class MemSize(size: BigInt) extends MemRegion -case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion +case class MemSize(size: BigInt, prot: Int) extends MemRegion +case class MemSubmap(size: BigInt, entries: AddrMapSeq) extends MemRegion object Submap { def apply(size: BigInt, entries: AddrMapEntry*) = new MemSubmap(size, entries) } -case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt) +case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int) -class AddrHashMap(addrmap: AddrMap) { +class AddrMapProt extends Bundle { + val r = Bool() + val w = Bool() + val x = Bool() +} + +class AddrHashMap(addrmap: AddrMapSeq) { val mapping = new HashMap[String, AddrHashMapEntry] - private def genPairs(addrmap: AddrMap): Seq[(String, AddrHashMapEntry)] = { + private def genPairs(addrmap: AddrMapSeq): Seq[(String, AddrHashMapEntry)] = { var ind = 0 var base = BigInt(0) var pairs = Seq[(String, AddrHashMapEntry)]() addrmap.foreach { case (name, startOpt, region) => region match { - case MemSize(size) => { + case MemSize(size, prot) => { if (!startOpt.isEmpty) base = startOpt.get - pairs = (name, AddrHashMapEntry(ind, base, size)) +: pairs + pairs = (name, AddrHashMapEntry(ind, base, size, prot)) +: pairs base += size ind += 1 } case MemSubmap(size, submap) => { if (!startOpt.isEmpty) base = startOpt.get val subpairs = genPairs(submap).map { - case (subname, AddrHashMapEntry(subind, subbase, subsize)) => + case (subname, AddrHashMapEntry(subind, subbase, subsize, prot)) => (name + ":" + subname, - AddrHashMapEntry(ind + subind, base + subbase, subsize)) + AddrHashMapEntry(ind + subind, base + subbase, subsize, prot)) } pairs = subpairs ++ pairs ind += subpairs.size @@ -553,16 +566,29 @@ class AddrHashMap(addrmap: AddrMap) { def nEntries: Int = mapping.size def apply(name: String): AddrHashMapEntry = mapping(name) def get(name: String): Option[AddrHashMapEntry] = mapping.get(name) - def sortedEntries(): Seq[(String, BigInt, BigInt)] = { - val arr = new Array[(String, BigInt, BigInt)](mapping.size) - mapping.foreach { case (name, AddrHashMapEntry(port, base, size)) => - arr(port) = (name, base, size) + def sortedEntries(): Seq[(String, BigInt, BigInt, Int)] = { + val arr = new Array[(String, BigInt, BigInt, Int)](mapping.size) + mapping.foreach { case (name, AddrHashMapEntry(port, base, size, prot)) => + arr(port) = (name, base, size, prot) } arr.toSeq } + + def isValid(addr: UInt): Bool = { + sortedEntries().map { case (_, base, size, _) => + addr >= UInt(base) && addr < UInt(base + size) + }.reduceLeft(_ || _) + } + + def getProt(addr: UInt): AddrMapProt = { + Mux1H(sortedEntries().map { case (_, base, size, prot) => + (addr >= UInt(base) && addr < UInt(base + size), + new AddrMapProt().fromBits(Bits(prot, 3))) + }) + } } -case object NASTIAddrMap extends Field[AddrMap] +case object NASTIAddrMap extends Field[AddrMapSeq] case object NASTIAddrHashMap extends Field[AddrHashMap] class NASTIInterconnectIO(val nMasters: Int, val nSlaves: Int) extends Bundle { @@ -583,11 +609,11 @@ abstract class NASTIInterconnect extends NASTIModule { class NASTIRecursiveInterconnect( val nMasters: Int, val nSlaves: Int, - addrmap: AddrMap, base: BigInt = 0) extends NASTIInterconnect { + addrmap: AddrMapSeq, base: BigInt = 0) extends NASTIInterconnect { - private def mapCountSlaves(addrmap: AddrMap): Int = { + private def mapCountSlaves(addrmap: AddrMapSeq): Int = { addrmap.map { - case (_, _, MemSize(_)) => 1 + case (_, _, MemSize(_, _)) => 1 case (_, _, MemSubmap(_, submap)) => mapCountSlaves(submap) }.reduceLeft(_ + _) } @@ -618,7 +644,7 @@ class NASTIRecursiveInterconnect( addrmap.zip(realAddrMap).zipWithIndex.foreach { case (((_, _, region), (start, size)), i) => { region match { - case MemSize(_) => + case MemSize(_, _) => io.slaves(slaveInd) <> flatSlaves(i) slaveInd += 1 case MemSubmap(_, submap) => From ee65f6a84d62ed06f8c5dddbcc3e2c8f67c608d0 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 22 Sep 2015 10:30:09 -0700 Subject: [PATCH 020/116] get rid of Vec.fill in IOs --- junctions/src/main/scala/nasti.scala | 14 +++++++------- junctions/src/main/scala/smi.scala | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 81a9be28..b082fcd4 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -238,7 +238,7 @@ class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule wi /** Arbitrate among arbN masters requesting to a single slave */ class NASTIArbiter(val arbN: Int) extends NASTIModule { val io = new Bundle { - val master = Vec.fill(arbN) { new NASTISlaveIO } + val master = Vec(new NASTISlaveIO, arbN) val slave = new NASTIMasterIO } @@ -306,7 +306,7 @@ class NASTIArbiter(val arbN: Int) extends NASTIModule { * Arbiter locks until last message in channel is sent */ class NASTIReadDataArbiter(arbN: Int) extends NASTIModule { val io = new Bundle { - val in = Vec.fill(arbN) { Decoupled(new NASTIReadDataChannel) }.flip + val in = Vec(Decoupled(new NASTIReadDataChannel), arbN).flip val out = Decoupled(new NASTIReadDataChannel) } @@ -402,7 +402,7 @@ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { val io = new Bundle { val master = new NASTISlaveIO - val slave = Vec.fill(nSlaves) { new NASTIMasterIO } + val slave = Vec(new NASTIMasterIO, nSlaves) } var ar_ready = Bool(false) @@ -479,8 +479,8 @@ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { val io = new Bundle { - val masters = Vec.fill(nMasters) { new NASTISlaveIO } - val slaves = Vec.fill(nSlaves) { new NASTIMasterIO } + val masters = Vec(new NASTISlaveIO, nMasters) + val slaves = Vec(new NASTIMasterIO, nSlaves) } val routers = Vec.fill(nMasters) { Module(new NASTIRouter(addrmap)).io } @@ -594,8 +594,8 @@ case object NASTIAddrHashMap extends Field[AddrHashMap] class NASTIInterconnectIO(val nMasters: Int, val nSlaves: Int) extends Bundle { /* This is a bit confusing. The interconnect is a slave to the masters and * a master to the slaves. Hence why the declarations seem to be backwards. */ - val masters = Vec.fill(nMasters) { new NASTISlaveIO } - val slaves = Vec.fill(nSlaves) { new NASTIMasterIO } + val masters = Vec(new NASTISlaveIO, nMasters) + val slaves = Vec(new NASTIMasterIO, nSlaves) override def cloneType = new NASTIInterconnectIO(nMasters, nSlaves).asInstanceOf[this.type] } diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index e1955ab1..2c0ecf81 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -58,7 +58,7 @@ class SMIMem(val dataWidth: Int, val memDepth: Int) extends SMIPeripheral { class SMIArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) extends Module { val io = new Bundle { - val in = Vec.fill(n) { new SMIIO(dataWidth, addrWidth) }.flip + val in = Vec(new SMIIO(dataWidth, addrWidth), n).flip val out = new SMIIO(dataWidth, addrWidth) } From e3d2207c7265c065053ebd714c2d18f15af8befe Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 24 Sep 2015 16:57:50 -0700 Subject: [PATCH 021/116] Chisel3 compat: merge NASTIMasterIO and NASTISlaveIO so we do not depend on flip() modifying the object --- junctions/src/main/scala/nasti.scala | 26 ++++++++++++-------------- junctions/src/main/scala/smi.scala | 4 ++-- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index b082fcd4..f3e9f65f 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -55,7 +55,7 @@ trait NASTIChannel extends NASTIBundle trait NASTIMasterToSlaveChannel extends NASTIChannel trait NASTISlaveToMasterChannel extends NASTIChannel -class NASTIMasterIO extends Bundle { +class NASTIIO extends Bundle { val aw = Decoupled(new NASTIWriteAddressChannel) val w = Decoupled(new NASTIWriteDataChannel) val b = Decoupled(new NASTIWriteResponseChannel).flip @@ -63,8 +63,6 @@ class NASTIMasterIO extends Bundle { val r = Decoupled(new NASTIReadDataChannel).flip } -class NASTISlaveIO extends NASTIMasterIO { flip() } - trait HasNASTIMetadata extends NASTIBundle { val addr = UInt(width = nastiXAddrBits) val len = UInt(width = nastiXLenBits) @@ -182,9 +180,9 @@ object NASTIWriteResponseChannel { } } -class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule with NASTIParameters { +class MemIONASTIIOConverter(cacheBlockOffsetBits: Int) extends MIFModule with NASTIParameters { val io = new Bundle { - val nasti = new NASTISlaveIO + val nasti = (new NASTIIO).flip val mem = new MemIO } @@ -238,8 +236,8 @@ class MemIONASTISlaveIOConverter(cacheBlockOffsetBits: Int) extends MIFModule wi /** Arbitrate among arbN masters requesting to a single slave */ class NASTIArbiter(val arbN: Int) extends NASTIModule { val io = new Bundle { - val master = Vec(new NASTISlaveIO, arbN) - val slave = new NASTIMasterIO + val master = Vec(new NASTIIO, arbN).flip + val slave = new NASTIIO } if (arbN > 1) { @@ -346,7 +344,7 @@ class NASTIReadDataArbiter(arbN: Int) extends NASTIModule { /** A slave that send decode error for every request it receives */ class NASTIErrorSlave extends NASTIModule { - val io = new NASTISlaveIO + val io = (new NASTIIO).flip when (io.ar.fire()) { printf("Invalid read address %x\n", io.ar.bits.addr) } when (io.aw.fire()) { printf("Invalid write address %x\n", io.aw.bits.addr) } @@ -401,8 +399,8 @@ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { val nSlaves = addrmap.size val io = new Bundle { - val master = new NASTISlaveIO - val slave = Vec(new NASTIMasterIO, nSlaves) + val master = (new NASTIIO).flip + val slave = Vec(new NASTIIO, nSlaves) } var ar_ready = Bool(false) @@ -479,8 +477,8 @@ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { val io = new Bundle { - val masters = Vec(new NASTISlaveIO, nMasters) - val slaves = Vec(new NASTIMasterIO, nSlaves) + val masters = Vec(new NASTIIO, nMasters).flip + val slaves = Vec(new NASTIIO, nSlaves) } val routers = Vec.fill(nMasters) { Module(new NASTIRouter(addrmap)).io } @@ -594,8 +592,8 @@ case object NASTIAddrHashMap extends Field[AddrHashMap] class NASTIInterconnectIO(val nMasters: Int, val nSlaves: Int) extends Bundle { /* This is a bit confusing. The interconnect is a slave to the masters and * a master to the slaves. Hence why the declarations seem to be backwards. */ - val masters = Vec(new NASTISlaveIO, nMasters) - val slaves = Vec(new NASTIMasterIO, nSlaves) + val masters = Vec(new NASTIIO, nMasters).flip + val slaves = Vec(new NASTIIO, nSlaves) override def cloneType = new NASTIInterconnectIO(nMasters, nSlaves).asInstanceOf[this.type] } diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index 2c0ecf81..ec2e554e 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -250,10 +250,10 @@ class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) } /** Convert NASTI protocol to SMI protocol */ -class SMIIONASTISlaveIOConverter(val dataWidth: Int, val addrWidth: Int) +class SMIIONASTIIOConverter(val dataWidth: Int, val addrWidth: Int) extends NASTIModule { val io = new Bundle { - val nasti = new NASTISlaveIO + val nasti = (new NASTIIO).flip val smi = new SMIIO(dataWidth, addrWidth) } From 88b15dba600d0b67ab60bf2f7fb33b40c801d35b Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 24 Sep 2015 17:51:38 -0700 Subject: [PATCH 022/116] replace remaining uses of Vec.fill --- junctions/src/main/scala/nasti.scala | 4 ++-- junctions/src/main/scala/smi.scala | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index f3e9f65f..abda7321 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -481,8 +481,8 @@ class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) val slaves = Vec(new NASTIIO, nSlaves) } - val routers = Vec.fill(nMasters) { Module(new NASTIRouter(addrmap)).io } - val arbiters = Vec.fill(nSlaves) { Module(new NASTIArbiter(nMasters)).io } + val routers = Vec(nMasters, Module(new NASTIRouter(addrmap)).io) + val arbiters = Vec(nSlaves, Module(new NASTIArbiter(nMasters)).io) for (i <- 0 until nMasters) { routers(i).master <> io.masters(i) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index ec2e554e..b7dc329d 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -116,7 +116,7 @@ class SMIIONASTIReadIOConverter(val dataWidth: Int, val addrWidth: Int) val recvInd = Reg(init = UInt(0, wordCountBits)) val sendDone = Reg(init = Bool(false)) - val buffer = Reg(init = Vec.fill(maxWordsPerBeat) { Bits(0, dataWidth) }) + val buffer = Reg(init = Vec(maxWordsPerBeat, Bits(0, dataWidth))) io.ar.ready := (state === s_idle) From 2e63fb291a61aee66118877531ffe3e949d76a92 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Fri, 25 Sep 2015 10:05:38 -0700 Subject: [PATCH 023/116] put sensible defaults for NASTI channel constructors --- junctions/src/main/scala/nasti.scala | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index abda7321..d01e352f 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -112,7 +112,7 @@ class NASTIReadDataChannel extends NASTIResponseChannel with HasNASTIData { } object NASTIWriteAddressChannel { - def apply(id: UInt, addr: UInt, size: UInt, len: UInt) = { + def apply(id: UInt, addr: UInt, size: UInt, len: UInt = UInt(0)) = { val aw = Wire(new NASTIWriteAddressChannel) aw.id := id aw.addr := addr @@ -130,7 +130,7 @@ object NASTIWriteAddressChannel { } object NASTIReadAddressChannel { - def apply(id: UInt, addr: UInt, size: UInt, len: UInt) = { + def apply(id: UInt, addr: UInt, size: UInt, len: UInt = UInt(0)) = { val ar = Wire(new NASTIReadAddressChannel) ar.id := id ar.addr := addr @@ -148,7 +148,9 @@ object NASTIReadAddressChannel { } object NASTIWriteDataChannel { - def apply(strb: UInt, data: UInt, last: Bool) = { + private val strobeBits = new NASTIWriteDataChannel().nastiWStrobeBits + val fullStrobe = Fill(strobeBits, UInt(1, 1)) + def apply(data: UInt, strb: UInt = fullStrobe, last: Bool = Bool(true)) = { val w = Wire(new NASTIWriteDataChannel) w.strb := strb w.data := data @@ -159,7 +161,7 @@ object NASTIWriteDataChannel { } object NASTIReadDataChannel { - def apply(id: UInt, data: UInt, last: Bool, resp: UInt = UInt(0)) = { + def apply(id: UInt, data: UInt, last: Bool = Bool(true), resp: UInt = UInt(0)) = { val r = Wire(new NASTIReadDataChannel) r.id := id r.data := data From a9c6cced2d6278032dce5edfa518e692dc4265da Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Fri, 25 Sep 2015 11:03:24 -0700 Subject: [PATCH 024/116] fix bug in NASTIArbiter --- junctions/src/main/scala/nasti.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index d01e352f..2d92b49d 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -296,7 +296,9 @@ class NASTIArbiter(val arbN: Int) extends NASTIModule { io.slave.w.valid := io.master(w_chosen).w.valid && !w_done io.slave.ar <> ar_arb.io.out - io.slave.aw <> aw_arb.io.out + + io.slave.aw.bits <> aw_arb.io.out.bits + io.slave.aw.valid := aw_arb.io.out.valid && w_done aw_arb.io.out.ready := io.slave.aw.ready && w_done } else { io.slave <> io.master.head } From e75674c0cbebaee58679e0b30fb877d1b6a021c4 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 25 Sep 2015 17:05:07 -0700 Subject: [PATCH 025/116] Revert "replace remaining uses of Vec.fill" This reverts commit 16dca2186b95945ad2ba5f906113101de0726617. --- junctions/src/main/scala/nasti.scala | 4 ++-- junctions/src/main/scala/smi.scala | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 2d92b49d..8d76fc9c 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -485,8 +485,8 @@ class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) val slaves = Vec(new NASTIIO, nSlaves) } - val routers = Vec(nMasters, Module(new NASTIRouter(addrmap)).io) - val arbiters = Vec(nSlaves, Module(new NASTIArbiter(nMasters)).io) + val routers = Vec.fill(nMasters) { Module(new NASTIRouter(addrmap)).io } + val arbiters = Vec.fill(nSlaves) { Module(new NASTIArbiter(nMasters)).io } for (i <- 0 until nMasters) { routers(i).master <> io.masters(i) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index b7dc329d..ec2e554e 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -116,7 +116,7 @@ class SMIIONASTIReadIOConverter(val dataWidth: Int, val addrWidth: Int) val recvInd = Reg(init = UInt(0, wordCountBits)) val sendDone = Reg(init = Bool(false)) - val buffer = Reg(init = Vec(maxWordsPerBeat, Bits(0, dataWidth))) + val buffer = Reg(init = Vec.fill(maxWordsPerBeat) { Bits(0, dataWidth) }) io.ar.ready := (state === s_idle) From a08872c0e97ab9306179ebbd9ae478c8d1fd0058 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 25 Sep 2015 17:05:28 -0700 Subject: [PATCH 026/116] val -> def in static object --- junctions/src/main/scala/nasti.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 8d76fc9c..26306e54 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -148,8 +148,8 @@ object NASTIReadAddressChannel { } object NASTIWriteDataChannel { - private val strobeBits = new NASTIWriteDataChannel().nastiWStrobeBits - val fullStrobe = Fill(strobeBits, UInt(1, 1)) + private def strobeBits = new NASTIWriteDataChannel().nastiWStrobeBits + def fullStrobe = Fill(strobeBits, UInt(1, 1)) def apply(data: UInt, strb: UInt = fullStrobe, last: Bool = Bool(true)) = { val w = Wire(new NASTIWriteDataChannel) w.strb := strb From 39a749843c9b7181b521c3d79346867e6b5c7e87 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Fri, 2 Oct 2015 14:19:51 -0700 Subject: [PATCH 027/116] refactor NASTI to not use param; new AddrMap class --- junctions/src/main/scala/nasti.scala | 302 ++++++++++++++------------- junctions/src/main/scala/smi.scala | 36 ++-- junctions/src/main/scala/util.scala | 7 + 3 files changed, 181 insertions(+), 164 deletions(-) create mode 100644 junctions/src/main/scala/util.scala diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 26306e54..413f3f5c 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -6,21 +6,20 @@ import scala.math.max import scala.collection.mutable.ArraySeq import scala.collection.mutable.HashMap +case object NastiBitWidths extends Field[NastiParameters] +case object NastiAddrMap extends Field[AddrMap] case object MMIOBase extends Field[BigInt] -case object NASTIDataBits extends Field[Int] -case object NASTIAddrBits extends Field[Int] -case object NASTIIdBits extends Field[Int] -object bigIntPow2 { - def apply(in: BigInt): Boolean = in > 0 && ((in & (in-1)) == 0) -} +case class NastiParameters(dataBits: Int, addrBits: Int, idBits: Int) -trait NASTIParameters extends UsesParameters { - val nastiXDataBits = params(NASTIDataBits) +trait HasNastiParameters { + implicit val p: Parameters + val external = p(NastiBitWidths) + val nastiXDataBits = external.dataBits val nastiWStrobeBits = nastiXDataBits / 8 - val nastiXAddrBits = params(NASTIAddrBits) - val nastiWIdBits = params(NASTIIdBits) - val nastiRIdBits = params(NASTIIdBits) + val nastiXAddrBits = external.addrBits + val nastiWIdBits = external.idBits + val nastiRIdBits = external.idBits val nastiXIdBits = max(nastiWIdBits, nastiRIdBits) val nastiXUserBits = 1 val nastiAWUserBits = nastiXUserBits @@ -48,22 +47,16 @@ trait NASTIParameters extends UsesParameters { UInt(128) -> UInt(7))) } -abstract class NASTIBundle extends Bundle with NASTIParameters -abstract class NASTIModule extends Module with NASTIParameters - -trait NASTIChannel extends NASTIBundle -trait NASTIMasterToSlaveChannel extends NASTIChannel -trait NASTISlaveToMasterChannel extends NASTIChannel - -class NASTIIO extends Bundle { - val aw = Decoupled(new NASTIWriteAddressChannel) - val w = Decoupled(new NASTIWriteDataChannel) - val b = Decoupled(new NASTIWriteResponseChannel).flip - val ar = Decoupled(new NASTIReadAddressChannel) - val r = Decoupled(new NASTIReadDataChannel).flip +abstract class NastiModule extends Module with HasNastiParameters +abstract class NastiBundle(implicit val p: Parameters) extends Bundle with HasNastiParameters { + override def cloneType = this.getClass.getConstructors.head.newInstance(p).asInstanceOf[this.type] } -trait HasNASTIMetadata extends NASTIBundle { +abstract class NastiChannel(implicit p: Parameters) extends NastiBundle()(p) +abstract class NastiMasterToSlaveChannel(implicit p: Parameters) extends NastiChannel()(p) +abstract class NastiSlaveToMasterChannel(implicit p: Parameters) extends NastiChannel()(p) + +trait HasNastiMetadata extends HasNastiParameters { val addr = UInt(width = nastiXAddrBits) val len = UInt(width = nastiXLenBits) val size = UInt(width = nastiXSizeBits) @@ -75,45 +68,56 @@ trait HasNASTIMetadata extends NASTIBundle { val region = UInt(width = nastiXRegionBits) } -trait HasNASTIData extends NASTIBundle { +trait HasNastiData extends HasNastiParameters { val data = UInt(width = nastiXDataBits) val last = Bool() } -class NASTIAddressChannel extends NASTIMasterToSlaveChannel with HasNASTIMetadata +class NastiIO(implicit p: Parameters) extends NastiBundle()(p) { + val aw = Decoupled(new NastiWriteAddressChannel) + val w = Decoupled(new NastiWriteDataChannel) + val b = Decoupled(new NastiWriteResponseChannel).flip + val ar = Decoupled(new NastiReadAddressChannel) + val r = Decoupled(new NastiReadDataChannel).flip +} -class NASTIResponseChannel extends NASTISlaveToMasterChannel { +class NastiAddressChannel(implicit p: Parameters) extends NastiMasterToSlaveChannel()(p) + with HasNastiMetadata + +class NastiResponseChannel(implicit p: Parameters) extends NastiSlaveToMasterChannel()(p) { val resp = UInt(width = nastiXRespBits) } -class NASTIWriteAddressChannel extends NASTIAddressChannel { +class NastiWriteAddressChannel(implicit p: Parameters) extends NastiAddressChannel()(p) { val id = UInt(width = nastiWIdBits) val user = UInt(width = nastiAWUserBits) } -class NASTIWriteDataChannel extends NASTIMasterToSlaveChannel with HasNASTIData { +class NastiWriteDataChannel(implicit p: Parameters) extends NastiMasterToSlaveChannel()(p) + with HasNastiData { val strb = UInt(width = nastiWStrobeBits) val user = UInt(width = nastiWUserBits) } -class NASTIWriteResponseChannel extends NASTIResponseChannel { +class NastiWriteResponseChannel(implicit p: Parameters) extends NastiResponseChannel()(p) { val id = UInt(width = nastiWIdBits) val user = UInt(width = nastiBUserBits) } -class NASTIReadAddressChannel extends NASTIAddressChannel { +class NastiReadAddressChannel(implicit p: Parameters) extends NastiAddressChannel()(p) { val id = UInt(width = nastiRIdBits) val user = UInt(width = nastiARUserBits) } -class NASTIReadDataChannel extends NASTIResponseChannel with HasNASTIData { +class NastiReadDataChannel(implicit p: Parameters) extends NastiResponseChannel()(p) + with HasNastiData { val id = UInt(width = nastiRIdBits) val user = UInt(width = nastiRUserBits) } -object NASTIWriteAddressChannel { - def apply(id: UInt, addr: UInt, size: UInt, len: UInt = UInt(0)) = { - val aw = Wire(new NASTIWriteAddressChannel) +object NastiWriteAddressChannel { + def apply(id: UInt, addr: UInt, size: UInt, len: UInt = UInt(0))(implicit p: Parameters) = { + val aw = Wire(new NastiWriteAddressChannel) aw.id := id aw.addr := addr aw.len := len @@ -129,9 +133,9 @@ object NASTIWriteAddressChannel { } } -object NASTIReadAddressChannel { - def apply(id: UInt, addr: UInt, size: UInt, len: UInt = UInt(0)) = { - val ar = Wire(new NASTIReadAddressChannel) +object NastiReadAddressChannel { + def apply(id: UInt, addr: UInt, size: UInt, len: UInt = UInt(0))(implicit p: Parameters) = { + val ar = Wire(new NastiReadAddressChannel) ar.id := id ar.addr := addr ar.len := len @@ -147,22 +151,27 @@ object NASTIReadAddressChannel { } } -object NASTIWriteDataChannel { - private def strobeBits = new NASTIWriteDataChannel().nastiWStrobeBits - def fullStrobe = Fill(strobeBits, UInt(1, 1)) - def apply(data: UInt, strb: UInt = fullStrobe, last: Bool = Bool(true)) = { - val w = Wire(new NASTIWriteDataChannel) - w.strb := strb +object NastiWriteDataChannel { + def apply(data: UInt, last: Bool = Bool(true))(implicit p: Parameters): NastiWriteDataChannel = { + val w = Wire(new NastiWriteDataChannel) + w.strb := Fill(w.nastiWStrobeBits, UInt(1, 1)) w.data := data w.last := last w.user := UInt(0) w } + def apply(data: UInt, strb: UInt, last: Bool) + (implicit p: Parameters): NastiWriteDataChannel = { + val w = apply(data, last) + w.strb := strb + w + } } -object NASTIReadDataChannel { - def apply(id: UInt, data: UInt, last: Bool = Bool(true), resp: UInt = UInt(0)) = { - val r = Wire(new NASTIReadDataChannel) +object NastiReadDataChannel { + def apply(id: UInt, data: UInt, last: Bool = Bool(true), resp: UInt = UInt(0))( + implicit p: Parameters) = { + val r = Wire(new NastiReadDataChannel) r.id := id r.data := data r.last := last @@ -172,9 +181,9 @@ object NASTIReadDataChannel { } } -object NASTIWriteResponseChannel { - def apply(id: UInt, resp: UInt = UInt(0)) = { - val b = Wire(new NASTIWriteResponseChannel) +object NastiWriteResponseChannel { + def apply(id: UInt, resp: UInt = UInt(0))(implicit p: Parameters) = { + val b = Wire(new NastiWriteResponseChannel) b.id := id b.resp := resp b.user := UInt(0) @@ -182,9 +191,10 @@ object NASTIWriteResponseChannel { } } -class MemIONASTIIOConverter(cacheBlockOffsetBits: Int) extends MIFModule with NASTIParameters { +class MemIONastiIOConverter(cacheBlockOffsetBits: Int)(implicit val p: Parameters) extends MIFModule + with HasNastiParameters { val io = new Bundle { - val nasti = (new NASTIIO).flip + val nasti = (new NastiIO).flip val mem = new MemIO } @@ -192,13 +202,13 @@ class MemIONASTIIOConverter(cacheBlockOffsetBits: Int) extends MIFModule with NA val (mif_cnt_out, mif_wrap_out) = Counter(io.mem.resp.fire(), mifDataBeats) assert(!io.nasti.aw.valid || io.nasti.aw.bits.size === UInt(log2Up(mifDataBits/8)), - "NASTI data size does not match MemIO data size") + "Nasti data size does not match MemIO data size") assert(!io.nasti.ar.valid || io.nasti.ar.bits.size === UInt(log2Up(mifDataBits/8)), - "NASTI data size does not match MemIO data size") + "Nasti data size does not match MemIO data size") assert(!io.nasti.aw.valid || io.nasti.aw.bits.len === UInt(mifDataBeats - 1), - "NASTI length does not match number of MemIO beats") + "Nasti length does not match number of MemIO beats") assert(!io.nasti.ar.valid || io.nasti.ar.bits.len === UInt(mifDataBeats - 1), - "NASTI length does not match number of MemIO beats") + "Nasti length does not match number of MemIO beats") // according to the spec, we can't send b until the last transfer on w val b_ok = Reg(init = Bool(true)) @@ -236,17 +246,17 @@ class MemIONASTIIOConverter(cacheBlockOffsetBits: Int) extends MIFModule with NA } /** Arbitrate among arbN masters requesting to a single slave */ -class NASTIArbiter(val arbN: Int) extends NASTIModule { +class NastiArbiter(val arbN: Int)(implicit val p: Parameters) extends NastiModule { val io = new Bundle { - val master = Vec(new NASTIIO, arbN).flip - val slave = new NASTIIO + val master = Vec(new NastiIO, arbN).flip + val slave = new NastiIO } if (arbN > 1) { val arbIdBits = log2Up(arbN) - val ar_arb = Module(new RRArbiter(new NASTIReadAddressChannel, arbN)) - val aw_arb = Module(new RRArbiter(new NASTIWriteAddressChannel, arbN)) + val ar_arb = Module(new RRArbiter(new NastiReadAddressChannel, arbN)) + val aw_arb = Module(new RRArbiter(new NastiWriteAddressChannel, arbN)) val slave_r_arb_id = io.slave.r.bits.id(arbIdBits - 1, 0) val slave_b_arb_id = io.slave.b.bits.id(arbIdBits - 1, 0) @@ -304,12 +314,12 @@ class NASTIArbiter(val arbN: Int) extends NASTIModule { } else { io.slave <> io.master.head } } -/** Locking RR arbiter for NASTI read data channel +/** Locking RR arbiter for Nasti read data channel * Arbiter locks until last message in channel is sent */ -class NASTIReadDataArbiter(arbN: Int) extends NASTIModule { +class NastiReadDataArbiter(arbN: Int)(implicit val p: Parameters) extends NastiModule { val io = new Bundle { - val in = Vec(Decoupled(new NASTIReadDataChannel), arbN).flip - val out = Decoupled(new NASTIReadDataChannel) + val in = Vec(Decoupled(new NastiReadDataChannel), arbN).flip + val out = Decoupled(new NastiReadDataChannel) } def rotateLeft[T <: Data](norm: Vec[T], rot: UInt): Vec[T] = { @@ -347,13 +357,13 @@ class NASTIReadDataArbiter(arbN: Int) extends NASTIModule { } /** A slave that send decode error for every request it receives */ -class NASTIErrorSlave extends NASTIModule { - val io = (new NASTIIO).flip +class NastiErrorSlave(implicit val p: Parameters) extends NastiModule { + val io = (new NastiIO).flip when (io.ar.fire()) { printf("Invalid read address %x\n", io.ar.bits.addr) } when (io.aw.fire()) { printf("Invalid write address %x\n", io.aw.bits.addr) } - val r_queue = Module(new Queue(new NASTIReadAddressChannel, 2)) + val r_queue = Module(new Queue(new NastiReadAddressChannel, 2)) r_queue.io.enq <> io.ar val responding = Reg(init = Bool(false)) @@ -396,15 +406,15 @@ class NASTIErrorSlave extends NASTIModule { b_queue.io.deq.ready := io.b.ready && !draining } -/** Take a single NASTI master and route its requests to various slaves +/** Take a single Nasti master and route its requests to various slaves * @param addrmap a sequence of base address + memory size pairs, * on for each slave interface */ -class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { +class NastiRouter(addrmap: Seq[(BigInt, BigInt)])(implicit val p: Parameters) extends NastiModule { val nSlaves = addrmap.size val io = new Bundle { - val master = (new NASTIIO).flip - val slave = Vec(new NASTIIO, nSlaves) + val master = (new NastiIO).flip + val slave = Vec(new NastiIO, nSlaves) } var ar_ready = Bool(false) @@ -446,7 +456,7 @@ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { w_ready = w_ready || (s.w.ready && chosen) } - val err_slave = Module(new NASTIErrorSlave) + val err_slave = Module(new NastiErrorSlave) err_slave.io.ar.valid := !r_valid_addr && io.master.ar.valid err_slave.io.ar.bits := io.master.ar.bits err_slave.io.aw.valid := !w_valid_addr && io.master.aw.valid @@ -458,8 +468,8 @@ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { io.master.aw.ready := aw_ready || (!w_valid_addr && err_slave.io.aw.ready) io.master.w.ready := w_ready || err_slave.io.w.ready - val b_arb = Module(new RRArbiter(new NASTIWriteResponseChannel, nSlaves + 1)) - val r_arb = Module(new NASTIReadDataArbiter(nSlaves + 1)) + val b_arb = Module(new RRArbiter(new NastiWriteResponseChannel, nSlaves + 1)) + val r_arb = Module(new NastiReadDataArbiter(nSlaves + 1)) for (i <- 0 until nSlaves) { b_arb.io.in(i) <> io.slave(i).b @@ -473,20 +483,20 @@ class NASTIRouter(addrmap: Seq[(BigInt, BigInt)]) extends NASTIModule { io.master.r <> r_arb.io.out } -/** Crossbar between multiple NASTI masters and slaves - * @param nMasters the number of NASTI masters - * @param nSlaves the number of NASTI slaves +/** Crossbar between multiple Nasti masters and slaves + * @param nMasters the number of Nasti masters + * @param nSlaves the number of Nasti slaves * @param addrmap a sequence of base - size pairs; * size of addrmap should be nSlaves */ -class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) - extends NASTIModule { +class NastiCrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) + (implicit val p: Parameters) extends NastiModule { val io = new Bundle { - val masters = Vec(new NASTIIO, nMasters).flip - val slaves = Vec(new NASTIIO, nSlaves) + val masters = Vec(new NastiIO, nMasters).flip + val slaves = Vec(new NastiIO, nSlaves) } - val routers = Vec.fill(nMasters) { Module(new NASTIRouter(addrmap)).io } - val arbiters = Vec.fill(nSlaves) { Module(new NASTIArbiter(nMasters)).io } + val routers = Vec.fill(nMasters) { Module(new NastiRouter(addrmap)).io } + val arbiters = Vec.fill(nSlaves) { Module(new NastiArbiter(nMasters)).io } for (i <- 0 until nMasters) { routers(i).master <> io.masters(i) @@ -498,13 +508,7 @@ class NASTICrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) } } -case object NASTINMasters extends Field[Int] -case object NASTINSlaves extends Field[Int] - -object AddrMap { - type AddrMapEntry = (String, Option[BigInt], MemRegion) - type AddrMapSeq = Seq[AddrMapEntry] - +object AddrMapConsts { val R = 0x4 val W = 0x2 val X = 0x1 @@ -512,19 +516,6 @@ object AddrMap { val RX = R | X val RWX = R | W | X } -import AddrMap._ - -abstract class MemRegion { def size: BigInt } - -case class MemSize(size: BigInt, prot: Int) extends MemRegion -case class MemSubmap(size: BigInt, entries: AddrMapSeq) extends MemRegion - -object Submap { - def apply(size: BigInt, entries: AddrMapEntry*) = - new MemSubmap(size, entries) -} - -case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int) class AddrMapProt extends Bundle { val r = Bool() @@ -532,14 +523,47 @@ class AddrMapProt extends Bundle { val x = Bool() } -class AddrHashMap(addrmap: AddrMapSeq) { +abstract class MemRegion { def size: BigInt } + +case class MemSize(size: BigInt, prot: Int) extends MemRegion + +case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion + +//object Submap { +// def apply(size: BigInt, entries: AddrMapEntry*) = +// new MemSubmap(size, entries) +//} + +case class AddrMapEntry(name: String, start: Option[BigInt], region: MemRegion) + +case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int) + +class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[AddrMapEntry] { + + def apply(index: Int): AddrMapEntry = entries(index) + + def length: Int = entries.size + + def countSlaves: Int = { + this map { entry: AddrMapEntry => entry.region match { + case MemSize(_, _) => 1 + case MemSubmap(_, submap) => submap.countSlaves + }} reduceLeft(_ + _) + } +} + +object AddrMap { + def apply(elems: AddrMapEntry*): AddrMap = new AddrMap(elems) +} + +class AddrHashMap(addrmap: AddrMap) { val mapping = new HashMap[String, AddrHashMapEntry] - private def genPairs(addrmap: AddrMapSeq): Seq[(String, AddrHashMapEntry)] = { + private def genPairs(am: AddrMap): Seq[(String, AddrHashMapEntry)] = { var ind = 0 var base = BigInt(0) var pairs = Seq[(String, AddrHashMapEntry)]() - addrmap.foreach { case (name, startOpt, region) => + am.foreach { case AddrMapEntry(name, startOpt, region) => region match { case MemSize(size, prot) => { if (!startOpt.isEmpty) base = startOpt.get @@ -590,43 +614,35 @@ class AddrHashMap(addrmap: AddrMapSeq) { } } -case object NASTIAddrMap extends Field[AddrMapSeq] -case object NASTIAddrHashMap extends Field[AddrHashMap] - -class NASTIInterconnectIO(val nMasters: Int, val nSlaves: Int) extends Bundle { +class NastiInterconnectIO(val nMasters: Int, val nSlaves: Int) + (implicit p: Parameters) extends Bundle { /* This is a bit confusing. The interconnect is a slave to the masters and * a master to the slaves. Hence why the declarations seem to be backwards. */ - val masters = Vec(new NASTIIO, nMasters).flip - val slaves = Vec(new NASTIIO, nSlaves) + val masters = Vec(new NastiIO, nMasters).flip + val slaves = Vec(new NastiIO, nSlaves) override def cloneType = - new NASTIInterconnectIO(nMasters, nSlaves).asInstanceOf[this.type] + new NastiInterconnectIO(nMasters, nSlaves).asInstanceOf[this.type] } -abstract class NASTIInterconnect extends NASTIModule { +abstract class NastiInterconnect extends NastiModule { val nMasters: Int val nSlaves: Int - lazy val io = new NASTIInterconnectIO(nMasters, nSlaves) + lazy val io = new NastiInterconnectIO(nMasters, nSlaves) } -class NASTIRecursiveInterconnect( - val nMasters: Int, val nSlaves: Int, - addrmap: AddrMapSeq, base: BigInt = 0) extends NASTIInterconnect { - - private def mapCountSlaves(addrmap: AddrMapSeq): Int = { - addrmap.map { - case (_, _, MemSize(_, _)) => 1 - case (_, _, MemSubmap(_, submap)) => mapCountSlaves(submap) - }.reduceLeft(_ + _) - } - +class NastiRecursiveInterconnect( + val nMasters: Int, + val nSlaves: Int, + addrmap: AddrMap, + base: BigInt = 0) + (implicit val p: Parameters) extends NastiInterconnect { var lastEnd = base var slaveInd = 0 val levelSize = addrmap.size - val realAddrMap = new ArraySeq[(BigInt, BigInt)](addrmap.size) - addrmap.zipWithIndex.foreach { case ((_, startOpt, region), i) => + addrmap.zipWithIndex.foreach { case (AddrMapEntry(_, startOpt, region), i) => val start = startOpt.getOrElse(lastEnd) val size = region.size realAddrMap(i) = (start, size) @@ -634,25 +650,24 @@ class NASTIRecursiveInterconnect( } val flatSlaves = if (nMasters > 1) { - val xbar = Module(new NASTICrossbar(nMasters, levelSize, realAddrMap)) + val xbar = Module(new NastiCrossbar(nMasters, levelSize, realAddrMap)) xbar.io.masters <> io.masters xbar.io.slaves } else { - val router = Module(new NASTIRouter(realAddrMap)) + val router = Module(new NastiRouter(realAddrMap)) router.io.master <> io.masters.head router.io.slave } addrmap.zip(realAddrMap).zipWithIndex.foreach { - case (((_, _, region), (start, size)), i) => { - region match { + case ((entry, (start, size)), i) => { + entry.region match { case MemSize(_, _) => io.slaves(slaveInd) <> flatSlaves(i) slaveInd += 1 case MemSubmap(_, submap) => - val subSlaves = mapCountSlaves(submap) - val ic = Module(new NASTIRecursiveInterconnect( - 1, subSlaves, submap, start)) + val subSlaves = submap.countSlaves + val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, start)) ic.io.masters.head <> flatSlaves(i) io.slaves.drop(slaveInd).take(subSlaves).zip(ic.io.slaves).foreach { case (s, m) => s <> m @@ -663,20 +678,15 @@ class NASTIRecursiveInterconnect( } } -class NASTITopInterconnect extends NASTIInterconnect { - val nMasters = params(NASTINMasters) - val nSlaves = params(NASTINSlaves) - - bigIntPow2(params(MMIOBase)) - - val temp = Module(new NASTIRecursiveInterconnect( - nMasters, nSlaves, params(NASTIAddrMap))) +class NastiTopInterconnect(val nMasters: Int, val nSlaves: Int) + (implicit val p: Parameters) extends NastiInterconnect { + val temp = Module(new NastiRecursiveInterconnect(nMasters, nSlaves, p(NastiAddrMap))) temp.io.masters.zip(io.masters).foreach { case (t, i) => t.ar <> i.ar t.aw <> i.aw // this queue is necessary to break up the aw - w dependence - // introduced by the TileLink -> NASTI converter + // introduced by the TileLink -> Nasti converter t.w <> Queue(i.w) i.b <> t.b i.r <> t.r diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index ec2e554e..52d98779 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -87,11 +87,11 @@ class SMIArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) io.out.resp.ready := io.in(choice).resp.ready } -class SMIIONASTIReadIOConverter(val dataWidth: Int, val addrWidth: Int) - extends NASTIModule { +class SMIIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) + (implicit val p: Parameters) extends NastiModule { val io = new Bundle { - val ar = Decoupled(new NASTIReadAddressChannel).flip - val r = Decoupled(new NASTIReadDataChannel) + val ar = Decoupled(new NastiReadAddressChannel).flip + val r = Decoupled(new NastiReadDataChannel) val smi = new SMIIO(dataWidth, addrWidth) } @@ -127,7 +127,7 @@ class SMIIONASTIReadIOConverter(val dataWidth: Int, val addrWidth: Int) io.smi.resp.ready := (state === s_read) io.r.valid := (state === s_resp) - io.r.bits := NASTIReadDataChannel( + io.r.bits := NastiReadDataChannel( id = id, data = buffer.toBits, last = (nBeats === UInt(0))) @@ -169,12 +169,12 @@ class SMIIONASTIReadIOConverter(val dataWidth: Int, val addrWidth: Int) } } -class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) - extends NASTIModule { +class SMIIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) + (implicit val p: Parameters) extends NastiModule { val io = new Bundle { - val aw = Decoupled(new NASTIWriteAddressChannel).flip - val w = Decoupled(new NASTIWriteDataChannel).flip - val b = Decoupled(new NASTIWriteResponseChannel) + val aw = Decoupled(new NastiWriteAddressChannel).flip + val w = Decoupled(new NastiWriteDataChannel).flip + val b = Decoupled(new NastiWriteResponseChannel) val smi = new SMIIO(dataWidth, addrWidth) } @@ -184,7 +184,7 @@ class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) private val addrOffBits = addrWidth + byteOffBits assert(!io.aw.valid || io.aw.bits.size >= UInt(byteOffBits), - "NASTI size must be >= SMI size") + "Nasti size must be >= SMI size") val id = Reg(UInt(width = nastiWIdBits)) val addr = Reg(UInt(width = addrWidth)) @@ -213,7 +213,7 @@ class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) io.smi.req.bits.data := data(dataWidth - 1, 0) io.smi.resp.ready := (state === s_ack) io.b.valid := (state === s_resp) - io.b.bits := NASTIWriteResponseChannel(id) + io.b.bits := NastiWriteResponseChannel(id) val jump = PriorityMux(strb(maxWordsPerBeat - 1, 1), (1 until maxWordsPerBeat).map(UInt(_))) @@ -249,21 +249,21 @@ class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) when (io.b.fire()) { state := s_idle } } -/** Convert NASTI protocol to SMI protocol */ -class SMIIONASTIIOConverter(val dataWidth: Int, val addrWidth: Int) - extends NASTIModule { +/** Convert Nasti protocol to SMI protocol */ +class SMIIONastiIOConverter(val dataWidth: Int, val addrWidth: Int) + (implicit val p: Parameters) extends NastiModule { val io = new Bundle { - val nasti = (new NASTIIO).flip + val nasti = (new NastiIO).flip val smi = new SMIIO(dataWidth, addrWidth) } require(isPow2(dataWidth), "SMI data width must be power of 2") - val reader = Module(new SMIIONASTIReadIOConverter(dataWidth, addrWidth)) + val reader = Module(new SMIIONastiReadIOConverter(dataWidth, addrWidth)) reader.io.ar <> io.nasti.ar io.nasti.r <> reader.io.r - val writer = Module(new SMIIONASTIWriteIOConverter(dataWidth, addrWidth)) + val writer = Module(new SMIIONastiWriteIOConverter(dataWidth, addrWidth)) writer.io.aw <> io.nasti.aw writer.io.w <> io.nasti.w io.nasti.b <> writer.io.b diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala new file mode 100644 index 00000000..1aebcad9 --- /dev/null +++ b/junctions/src/main/scala/util.scala @@ -0,0 +1,7 @@ +/// See LICENSE for license details. +package junctions +import Chisel._ + +object bigIntPow2 { + def apply(in: BigInt): Boolean = in > 0 && ((in & (in-1)) == 0) +} From 970445a26a258b50291219ca895ebc06ec227637 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Fri, 2 Oct 2015 15:37:41 -0700 Subject: [PATCH 028/116] refactor MemIO to not use params --- junctions/src/main/scala/memserdes.scala | 101 ++++++----------------- junctions/src/main/scala/nasti.scala | 7 +- junctions/src/main/scala/util.scala | 53 ++++++++++++ 3 files changed, 82 insertions(+), 79 deletions(-) diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index 78212952..1e15a78c 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -18,54 +18,57 @@ case object MIFDataBits extends Field[Int] case object MIFTagBits extends Field[Int] case object MIFDataBeats extends Field[Int] -trait MIFParameters extends UsesParameters { - val mifTagBits = params(MIFTagBits) - val mifAddrBits = params(MIFAddrBits) - val mifDataBits = params(MIFDataBits) - val mifDataBeats = params(MIFDataBeats) +trait HasMIFParameters { + implicit val p: Parameters + val mifTagBits = p(MIFTagBits) + val mifAddrBits = p(MIFAddrBits) + val mifDataBits = p(MIFDataBits) + val mifDataBeats = p(MIFDataBeats) } -abstract class MIFBundle extends Bundle with MIFParameters -abstract class MIFModule extends Module with MIFParameters +abstract class MIFModule extends Module with HasMIFParameters +abstract class MIFBundle(implicit p: Parameters) extends ParameterizedBundle()(p) + with HasMIFParameters -trait HasMemData extends MIFBundle { +trait HasMemData extends HasMIFParameters { val data = Bits(width = mifDataBits) } -trait HasMemAddr extends MIFBundle { +trait HasMemAddr extends HasMIFParameters { val addr = UInt(width = mifAddrBits) } -trait HasMemTag extends MIFBundle { +trait HasMemTag extends HasMIFParameters { val tag = UInt(width = mifTagBits) } -class MemReqCmd extends HasMemAddr with HasMemTag { +class MemReqCmd(implicit p: Parameters) extends MIFBundle()(p) with HasMemAddr with HasMemTag { val rw = Bool() } -class MemTag extends HasMemTag -class MemData extends HasMemData -class MemResp extends HasMemData with HasMemTag +class MemTag(implicit p: Parameters) extends ParameterizedBundle()(p) with HasMemTag +class MemData(implicit p: Parameters) extends ParameterizedBundle()(p) with HasMemData +class MemResp(implicit p: Parameters) extends ParameterizedBundle()(p) with HasMemData with HasMemTag -class MemIO extends Bundle { +class MemIO(implicit p: Parameters) extends ParameterizedBundle()(p) { val req_cmd = Decoupled(new MemReqCmd) val req_data = Decoupled(new MemData) val resp = Decoupled(new MemResp).flip } -class MemPipeIO extends Bundle { +class MemPipeIO(implicit p: Parameters) extends ParameterizedBundle()(p) { val req_cmd = Decoupled(new MemReqCmd) val req_data = Decoupled(new MemData) val resp = Valid(new MemResp).flip } -class MemSerializedIO(w: Int) extends Bundle { +class MemSerializedIO(w: Int)(implicit p: Parameters) extends ParameterizedBundle()(p) { val req = Decoupled(Bits(width = w)) val resp = Valid(Bits(width = w)).flip + //override def cloneType = new MemSerializedIO(w)(p).asInstanceOf[this.type] } -class MemSerdes(w: Int) extends MIFModule +class MemSerdes(w: Int)(implicit val p: Parameters) extends MIFModule { val io = new Bundle { val wide = new MemIO().flip @@ -140,12 +143,12 @@ class MemSerdes(w: Int) extends MIFModule io.wide.resp.bits := io.wide.resp.bits.fromBits(in_buf) } -class MemDesserIO(w: Int) extends Bundle { +class MemDesserIO(w: Int)(implicit p: Parameters) extends ParameterizedBundle()(p) { val narrow = new MemSerializedIO(w).flip val wide = new MemIO } -class MemDesser(w: Int) extends Module // test rig side +class MemDesser(w: Int)(implicit p: Parameters) extends Module // test rig side { val io = new MemDesserIO(w) val abits = io.wide.req_cmd.bits.toBits.getWidth @@ -211,59 +214,7 @@ class MemDesser(w: Int) extends Module // test rig side io.narrow.resp.bits := dataq.io.deq.bits.toBits >> (recv_cnt * UInt(w)) } -class HellaFlowQueue[T <: Data](val entries: Int)(data: => T) extends Module -{ - val io = new QueueIO(data, entries) - require(entries > 1) - - val do_flow = Wire(Bool()) - val do_enq = io.enq.fire() && !do_flow - val do_deq = io.deq.fire() && !do_flow - - val maybe_full = Reg(init=Bool(false)) - val enq_ptr = Counter(do_enq, entries)._1 - val (deq_ptr, deq_done) = Counter(do_deq, entries) - when (do_enq != do_deq) { maybe_full := do_enq } - - val ptr_match = enq_ptr === deq_ptr - val empty = ptr_match && !maybe_full - val full = ptr_match && maybe_full - val atLeastTwo = full || enq_ptr - deq_ptr >= UInt(2) - do_flow := empty && io.deq.ready - - val ram = SeqMem(data, entries) - when (do_enq) { ram.write(enq_ptr, io.enq.bits) } - - val ren = io.deq.ready && (atLeastTwo || !io.deq.valid && !empty) - val raddr = Mux(io.deq.valid, Mux(deq_done, UInt(0), deq_ptr + UInt(1)), deq_ptr) - val ram_out_valid = Reg(next = ren) - - io.deq.valid := Mux(empty, io.enq.valid, ram_out_valid) - io.enq.ready := !full - io.deq.bits := Mux(empty, io.enq.bits, ram.read(raddr, ren)) -} - -class HellaQueue[T <: Data](val entries: Int)(data: => T) extends Module -{ - val io = new QueueIO(data, entries) - - val fq = Module(new HellaFlowQueue(entries)(data)) - fq.io.enq <> io.enq - io.deq <> Queue(fq.io.deq, 1, pipe = true) -} - -object HellaQueue -{ - def apply[T <: Data](enq: DecoupledIO[T], entries: Int) = { - val q = Module((new HellaQueue(entries)) { enq.bits }) - q.io.enq.valid := enq.valid // not using <> so that override is allowed - q.io.enq.bits := enq.bits - enq.ready := q.io.enq.ready - q.io.deq - } -} - -class MemIOArbiter(val arbN: Int) extends MIFModule { +class MemIOArbiter(val arbN: Int)(implicit val p: Parameters) extends MIFModule { val io = new Bundle { val inner = Vec(new MemIO, arbN).flip val outer = new MemIO @@ -307,7 +258,7 @@ class MemIOArbiter(val arbN: Int) extends MIFModule { } object MemIOMemPipeIOConverter { - def apply(in: MemPipeIO): MemIO = { + def apply(in: MemPipeIO)(implicit p: Parameters): MemIO = { val out = Wire(new MemIO()) in.resp.valid := out.resp.valid in.resp.bits := out.resp.bits @@ -322,7 +273,7 @@ object MemIOMemPipeIOConverter { } } -class MemPipeIOMemIOConverter(numRequests: Int) extends MIFModule { +class MemPipeIOMemIOConverter(numRequests: Int)(implicit val p: Parameters) extends MIFModule { val io = new Bundle { val cpu = new MemIO().flip val mem = new MemPipeIO diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 413f3f5c..f26cf526 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -48,9 +48,8 @@ trait HasNastiParameters { } abstract class NastiModule extends Module with HasNastiParameters -abstract class NastiBundle(implicit val p: Parameters) extends Bundle with HasNastiParameters { - override def cloneType = this.getClass.getConstructors.head.newInstance(p).asInstanceOf[this.type] -} +abstract class NastiBundle(implicit p: Parameters) extends ParameterizedBundle()(p) + with HasNastiParameters abstract class NastiChannel(implicit p: Parameters) extends NastiBundle()(p) abstract class NastiMasterToSlaveChannel(implicit p: Parameters) extends NastiChannel()(p) @@ -73,7 +72,7 @@ trait HasNastiData extends HasNastiParameters { val last = Bool() } -class NastiIO(implicit p: Parameters) extends NastiBundle()(p) { +class NastiIO(implicit p: Parameters) extends ParameterizedBundle()(p) { val aw = Decoupled(new NastiWriteAddressChannel) val w = Decoupled(new NastiWriteDataChannel) val b = Decoupled(new NastiWriteResponseChannel).flip diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index 1aebcad9..cefe8470 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -5,3 +5,56 @@ import Chisel._ object bigIntPow2 { def apply(in: BigInt): Boolean = in > 0 && ((in & (in-1)) == 0) } + +class ParameterizedBundle(implicit val p: Parameters) extends Bundle { + override def cloneType = this.getClass.getConstructors.head.newInstance(p).asInstanceOf[this.type] +} + +class HellaFlowQueue[T <: Data](val entries: Int)(data: => T) extends Module { + val io = new QueueIO(data, entries) + require(entries > 1) + + val do_flow = Wire(Bool()) + val do_enq = io.enq.fire() && !do_flow + val do_deq = io.deq.fire() && !do_flow + + val maybe_full = Reg(init=Bool(false)) + val enq_ptr = Counter(do_enq, entries)._1 + val (deq_ptr, deq_done) = Counter(do_deq, entries) + when (do_enq != do_deq) { maybe_full := do_enq } + + val ptr_match = enq_ptr === deq_ptr + val empty = ptr_match && !maybe_full + val full = ptr_match && maybe_full + val atLeastTwo = full || enq_ptr - deq_ptr >= UInt(2) + do_flow := empty && io.deq.ready + + val ram = SeqMem(data, entries) + when (do_enq) { ram.write(enq_ptr, io.enq.bits) } + + val ren = io.deq.ready && (atLeastTwo || !io.deq.valid && !empty) + val raddr = Mux(io.deq.valid, Mux(deq_done, UInt(0), deq_ptr + UInt(1)), deq_ptr) + val ram_out_valid = Reg(next = ren) + + io.deq.valid := Mux(empty, io.enq.valid, ram_out_valid) + io.enq.ready := !full + io.deq.bits := Mux(empty, io.enq.bits, ram.read(raddr, ren)) +} + +class HellaQueue[T <: Data](val entries: Int)(data: => T) extends Module { + val io = new QueueIO(data, entries) + + val fq = Module(new HellaFlowQueue(entries)(data)) + fq.io.enq <> io.enq + io.deq <> Queue(fq.io.deq, 1, pipe = true) +} + +object HellaQueue { + def apply[T <: Data](enq: DecoupledIO[T], entries: Int) = { + val q = Module((new HellaQueue(entries)) { enq.bits }) + q.io.enq.valid := enq.valid // not using <> so that override is allowed + q.io.enq.bits := enq.bits + enq.ready := q.io.enq.ready + q.io.deq + } +} From adcd77db36ac25d7bc986dfcefc6274ac2ecb051 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Mon, 5 Oct 2015 20:33:55 -0700 Subject: [PATCH 029/116] Removed all traces of params --- junctions/src/main/scala/addrmap.scala | 107 ++++++++++++++++++ junctions/src/main/scala/hasti.scala | 74 ++++++------ junctions/src/main/scala/memserdes.scala | 18 +-- junctions/src/main/scala/nasti.scala | 136 +++-------------------- junctions/src/main/scala/package.scala | 2 +- junctions/src/main/scala/poci.scala | 17 ++- junctions/src/main/scala/smi.scala | 6 +- junctions/src/main/scala/util.scala | 2 +- 8 files changed, 182 insertions(+), 180 deletions(-) create mode 100644 junctions/src/main/scala/addrmap.scala diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala new file mode 100644 index 00000000..f37111c9 --- /dev/null +++ b/junctions/src/main/scala/addrmap.scala @@ -0,0 +1,107 @@ +// See LICENSE for license details. + +package junctions + +import Chisel._ +import scala.collection.mutable.HashMap + +abstract class MemRegion { def size: BigInt } + +case class MemSize(size: BigInt, prot: Int) extends MemRegion + +case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion + +object AddrMapConsts { + val R = 0x4 + val W = 0x2 + val X = 0x1 + val RW = R | W + val RX = R | X + val RWX = R | W | X +} + +class AddrMapProt extends Bundle { + val r = Bool() + val w = Bool() + val x = Bool() +} + +case class AddrMapEntry(name: String, start: Option[BigInt], region: MemRegion) + +case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int) + +class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[AddrMapEntry] { + + def apply(index: Int): AddrMapEntry = entries(index) + + def length: Int = entries.size + + def countSlaves: Int = { + this map { entry: AddrMapEntry => entry.region match { + case MemSize(_, _) => 1 + case MemSubmap(_, submap) => submap.countSlaves + }} reduceLeft(_ + _) + } +} + +object AddrMap { + def apply(elems: AddrMapEntry*): AddrMap = new AddrMap(elems) +} + +class AddrHashMap(addrmap: AddrMap) { + val mapping = new HashMap[String, AddrHashMapEntry] + + private def genPairs(am: AddrMap): Seq[(String, AddrHashMapEntry)] = { + var ind = 0 + var base = BigInt(0) + var pairs = Seq[(String, AddrHashMapEntry)]() + am.foreach { case AddrMapEntry(name, startOpt, region) => + region match { + case MemSize(size, prot) => { + if (!startOpt.isEmpty) base = startOpt.get + pairs = (name, AddrHashMapEntry(ind, base, size, prot)) +: pairs + base += size + ind += 1 + } + case MemSubmap(size, submap) => { + if (!startOpt.isEmpty) base = startOpt.get + val subpairs = genPairs(submap).map { + case (subname, AddrHashMapEntry(subind, subbase, subsize, prot)) => + (name + ":" + subname, + AddrHashMapEntry(ind + subind, base + subbase, subsize, prot)) + } + pairs = subpairs ++ pairs + ind += subpairs.size + base += size + } + } + } + pairs + } + + for ((name, ind) <- genPairs(addrmap)) { mapping(name) = ind } + + def nEntries: Int = mapping.size + def apply(name: String): AddrHashMapEntry = mapping(name) + def get(name: String): Option[AddrHashMapEntry] = mapping.get(name) + def sortedEntries(): Seq[(String, BigInt, BigInt, Int)] = { + val arr = new Array[(String, BigInt, BigInt, Int)](mapping.size) + mapping.foreach { case (name, AddrHashMapEntry(port, base, size, prot)) => + arr(port) = (name, base, size, prot) + } + arr.toSeq + } + + def isValid(addr: UInt): Bool = { + sortedEntries().map { case (_, base, size, _) => + addr >= UInt(base) && addr < UInt(base + size) + }.reduceLeft(_ || _) + } + + def getProt(addr: UInt): AddrMapProt = { + Mux1H(sortedEntries().map { case (_, base, size, prot) => + (addr >= UInt(base) && addr < UInt(base + size), + new AddrMapProt().fromBits(Bits(prot, 3))) + }) + } +} diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 2e3b1ff7..7a913dfe 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -2,7 +2,7 @@ package junctions import Chisel._ -abstract trait HASTIConstants +trait HastiConstants { val SZ_HTRANS = 2 val HTRANS_IDLE = UInt(0, SZ_HTRANS) @@ -27,16 +27,22 @@ abstract trait HASTIConstants val SZ_HSIZE = 3 val SZ_HPROT = 4 - // TODO: Parameterize - val SZ_HADDR = 32 - val SZ_HDATA = 32 - def dgate(valid: Bool, b: UInt) = Fill(b.getWidth, valid) & b } -class HASTIMasterIO extends Bundle -{ - val haddr = UInt(OUTPUT, SZ_HADDR) +trait HasHastiParameters { + implicit val p: Parameters + val hastiAddrBits = 32 + val hastiDataBits = 32 +} + +abstract class HastiModule(implicit val p: Parameters) extends Module + with HasHastiParameters +abstract class HastiBundle(implicit val p: Parameters) extends ParameterizedBundle()(p) + with HasHastiParameters + +class HastiMasterIO(implicit p: Parameters) extends HastiBundle()(p) { + val haddr = UInt(OUTPUT, hastiAddrBits) val hwrite = Bool(OUTPUT) val hsize = UInt(OUTPUT, SZ_HSIZE) val hburst = UInt(OUTPUT, SZ_HBURST) @@ -44,16 +50,15 @@ class HASTIMasterIO extends Bundle val htrans = UInt(OUTPUT, SZ_HTRANS) val hmastlock = Bool(OUTPUT) - val hwdata = Bits(OUTPUT, SZ_HDATA) - val hrdata = Bits(INPUT, SZ_HDATA) + val hwdata = Bits(OUTPUT, hastiDataBits) + val hrdata = Bits(INPUT, hastiDataBits) val hready = Bool(INPUT) val hresp = UInt(INPUT, SZ_HRESP) } -class HASTISlaveIO extends Bundle -{ - val haddr = UInt(INPUT, SZ_HADDR) +class HastiSlaveIO(implicit p: Parameters) extends HastiBundle()(p) { + val haddr = UInt(INPUT, hastiAddrBits) val hwrite = Bool(INPUT) val hsize = UInt(INPUT, SZ_HSIZE) val hburst = UInt(INPUT, SZ_HBURST) @@ -61,8 +66,8 @@ class HASTISlaveIO extends Bundle val htrans = UInt(INPUT, SZ_HTRANS) val hmastlock = Bool(INPUT) - val hwdata = Bits(INPUT, SZ_HDATA) - val hrdata = Bits(OUTPUT, SZ_HDATA) + val hwdata = Bits(INPUT, hastiDataBits) + val hrdata = Bits(OUTPUT, hastiDataBits) val hsel = Bool(INPUT) val hreadyin = Bool(INPUT) @@ -70,23 +75,22 @@ class HASTISlaveIO extends Bundle val hresp = UInt(OUTPUT, SZ_HRESP) } -class HASTIBus(amap: Seq[UInt=>Bool]) extends Module -{ +class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { - val master = new HASTIMasterIO().flip - val slaves = Vec(new HASTISlaveIO, amap.size).flip + val master = new HastiMasterIO().flip + val slaves = Vec(new HastiSlaveIO, amap.size).flip } // skid buffer val skb_valid = Reg(init = Bool(false)) - val skb_haddr = Reg(UInt(width = SZ_HADDR)) + val skb_haddr = Reg(UInt(width = hastiAddrBits)) val skb_hwrite = Reg(Bool()) val skb_hsize = Reg(UInt(width = SZ_HSIZE)) val skb_hburst = Reg(UInt(width = SZ_HBURST)) val skb_hprot = Reg(UInt(width = SZ_HPROT)) val skb_htrans = Reg(UInt(width = SZ_HTRANS)) val skb_hmastlock = Reg(Bool()) - val skb_hwdata = Reg(UInt(width = SZ_HDATA)) + val skb_hwdata = Reg(UInt(width = hastiDataBits)) val master_haddr = Mux(skb_valid, skb_haddr, io.master.haddr) val master_hwrite = Mux(skb_valid, skb_hwrite, io.master.hwrite) @@ -142,16 +146,15 @@ class HASTIBus(amap: Seq[UInt=>Bool]) extends Module io.master.hresp := Mux1H(s1_hsels, io.slaves.map(_.hresp)) } -class HASTISlaveMux(n: Int) extends Module -{ +class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { - val ins = Vec(new HASTISlaveIO, n) - val out = new HASTISlaveIO().flip + val ins = Vec(new HastiSlaveIO, n) + val out = new HastiSlaveIO().flip } // skid buffers val skb_valid = Array.fill(n){Reg(init = Bool(false))} - val skb_haddr = Array.fill(n){Reg(UInt(width = SZ_HADDR))} + val skb_haddr = Array.fill(n){Reg(UInt(width = hastiAddrBits))} val skb_hwrite = Array.fill(n){Reg(Bool())} val skb_hsize = Array.fill(n){Reg(UInt(width = SZ_HSIZE))} val skb_hburst = Array.fill(n){Reg(UInt(width = SZ_HBURST))} @@ -212,15 +215,15 @@ class HASTISlaveMux(n: Int) extends Module } } } -class HASTIXbar(nMasters: Int, addressMap: Seq[UInt=>Bool]) extends Module -{ +class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool]) + (implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { - val masters = Vec(new HASTIMasterIO, nMasters).flip - val slaves = Vec(new HASTISlaveIO, addressMap.size).flip + val masters = Vec(new HastiMasterIO, nMasters).flip + val slaves = Vec(new HastiSlaveIO, addressMap.size).flip } - val buses = List.fill(nMasters){Module(new HASTIBus(addressMap))} - val muxes = List.fill(addressMap.size){Module(new HASTISlaveMux(nMasters))} + val buses = List.fill(nMasters){Module(new HastiBus(addressMap))} + val muxes = List.fill(addressMap.size){Module(new HastiSlaveMux(nMasters))} (buses.map(b => b.io.master) zip io.masters) foreach { case (b, m) => b <> m } (muxes.map(m => m.io.out) zip io.slaves ) foreach { case (x, s) => x <> s } @@ -229,11 +232,10 @@ class HASTIXbar(nMasters: Int, addressMap: Seq[UInt=>Bool]) extends Module } } -class HASTISlaveToMaster extends Module -{ +class HastiSlaveToMaster(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { - val in = new HASTISlaveIO - val out = new HASTIMasterIO + val in = new HastiSlaveIO + val out = new HastiMasterIO } io.out.haddr := io.in.haddr diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index 1e15a78c..a439c8a9 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -26,8 +26,8 @@ trait HasMIFParameters { val mifDataBeats = p(MIFDataBeats) } -abstract class MIFModule extends Module with HasMIFParameters -abstract class MIFBundle(implicit p: Parameters) extends ParameterizedBundle()(p) +abstract class MIFModule(implicit val p: Parameters) extends Module with HasMIFParameters +abstract class MIFBundle(implicit val p: Parameters) extends ParameterizedBundle()(p) with HasMIFParameters trait HasMemData extends HasMIFParameters { @@ -46,9 +46,9 @@ class MemReqCmd(implicit p: Parameters) extends MIFBundle()(p) with HasMemAddr w val rw = Bool() } -class MemTag(implicit p: Parameters) extends ParameterizedBundle()(p) with HasMemTag -class MemData(implicit p: Parameters) extends ParameterizedBundle()(p) with HasMemData -class MemResp(implicit p: Parameters) extends ParameterizedBundle()(p) with HasMemData with HasMemTag +class MemTag(implicit p: Parameters) extends MIFBundle()(p) with HasMemTag +class MemData(implicit p: Parameters) extends MIFBundle()(p) with HasMemData +class MemResp(implicit p: Parameters) extends MIFBundle()(p) with HasMemData with HasMemTag class MemIO(implicit p: Parameters) extends ParameterizedBundle()(p) { val req_cmd = Decoupled(new MemReqCmd) @@ -68,7 +68,7 @@ class MemSerializedIO(w: Int)(implicit p: Parameters) extends ParameterizedBundl //override def cloneType = new MemSerializedIO(w)(p).asInstanceOf[this.type] } -class MemSerdes(w: Int)(implicit val p: Parameters) extends MIFModule +class MemSerdes(w: Int)(implicit p: Parameters) extends MIFModule { val io = new Bundle { val wide = new MemIO().flip @@ -154,7 +154,7 @@ class MemDesser(w: Int)(implicit p: Parameters) extends Module // test rig side val abits = io.wide.req_cmd.bits.toBits.getWidth val dbits = io.wide.req_data.bits.toBits.getWidth val rbits = io.wide.resp.bits.getWidth - val mifDataBeats = params(MIFDataBeats) + val mifDataBeats = p(MIFDataBeats) require(dbits >= abits && rbits >= dbits) val recv_cnt = Reg(init=UInt(0, log2Up((rbits+w-1)/w))) @@ -214,7 +214,7 @@ class MemDesser(w: Int)(implicit p: Parameters) extends Module // test rig side io.narrow.resp.bits := dataq.io.deq.bits.toBits >> (recv_cnt * UInt(w)) } -class MemIOArbiter(val arbN: Int)(implicit val p: Parameters) extends MIFModule { +class MemIOArbiter(val arbN: Int)(implicit p: Parameters) extends MIFModule { val io = new Bundle { val inner = Vec(new MemIO, arbN).flip val outer = new MemIO @@ -273,7 +273,7 @@ object MemIOMemPipeIOConverter { } } -class MemPipeIOMemIOConverter(numRequests: Int)(implicit val p: Parameters) extends MIFModule { +class MemPipeIOMemIOConverter(numRequests: Int)(implicit p: Parameters) extends MIFModule { val io = new Bundle { val cpu = new MemIO().flip val mem = new MemPipeIO diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index f26cf526..32e25228 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -4,9 +4,8 @@ package junctions import Chisel._ import scala.math.max import scala.collection.mutable.ArraySeq -import scala.collection.mutable.HashMap -case object NastiBitWidths extends Field[NastiParameters] +case object NastiKey extends Field[NastiParameters] case object NastiAddrMap extends Field[AddrMap] case object MMIOBase extends Field[BigInt] @@ -14,7 +13,7 @@ case class NastiParameters(dataBits: Int, addrBits: Int, idBits: Int) trait HasNastiParameters { implicit val p: Parameters - val external = p(NastiBitWidths) + val external = p(NastiKey) val nastiXDataBits = external.dataBits val nastiWStrobeBits = nastiXDataBits / 8 val nastiXAddrBits = external.addrBits @@ -47,8 +46,9 @@ trait HasNastiParameters { UInt(128) -> UInt(7))) } -abstract class NastiModule extends Module with HasNastiParameters -abstract class NastiBundle(implicit p: Parameters) extends ParameterizedBundle()(p) +abstract class NastiModule(implicit val p: Parameters) extends Module + with HasNastiParameters +abstract class NastiBundle(implicit val p: Parameters) extends ParameterizedBundle()(p) with HasNastiParameters abstract class NastiChannel(implicit p: Parameters) extends NastiBundle()(p) @@ -72,7 +72,7 @@ trait HasNastiData extends HasNastiParameters { val last = Bool() } -class NastiIO(implicit p: Parameters) extends ParameterizedBundle()(p) { +class NastiIO(implicit val p: Parameters) extends ParameterizedBundle()(p) { val aw = Decoupled(new NastiWriteAddressChannel) val w = Decoupled(new NastiWriteDataChannel) val b = Decoupled(new NastiWriteResponseChannel).flip @@ -190,7 +190,7 @@ object NastiWriteResponseChannel { } } -class MemIONastiIOConverter(cacheBlockOffsetBits: Int)(implicit val p: Parameters) extends MIFModule +class MemIONastiIOConverter(cacheBlockOffsetBits: Int)(implicit p: Parameters) extends MIFModule with HasNastiParameters { val io = new Bundle { val nasti = (new NastiIO).flip @@ -245,7 +245,7 @@ class MemIONastiIOConverter(cacheBlockOffsetBits: Int)(implicit val p: Parameter } /** Arbitrate among arbN masters requesting to a single slave */ -class NastiArbiter(val arbN: Int)(implicit val p: Parameters) extends NastiModule { +class NastiArbiter(val arbN: Int)(implicit p: Parameters) extends NastiModule { val io = new Bundle { val master = Vec(new NastiIO, arbN).flip val slave = new NastiIO @@ -315,7 +315,7 @@ class NastiArbiter(val arbN: Int)(implicit val p: Parameters) extends NastiModul /** Locking RR arbiter for Nasti read data channel * Arbiter locks until last message in channel is sent */ -class NastiReadDataArbiter(arbN: Int)(implicit val p: Parameters) extends NastiModule { +class NastiReadDataArbiter(arbN: Int)(implicit p: Parameters) extends NastiModule { val io = new Bundle { val in = Vec(Decoupled(new NastiReadDataChannel), arbN).flip val out = Decoupled(new NastiReadDataChannel) @@ -356,7 +356,7 @@ class NastiReadDataArbiter(arbN: Int)(implicit val p: Parameters) extends NastiM } /** A slave that send decode error for every request it receives */ -class NastiErrorSlave(implicit val p: Parameters) extends NastiModule { +class NastiErrorSlave(implicit p: Parameters) extends NastiModule { val io = (new NastiIO).flip when (io.ar.fire()) { printf("Invalid read address %x\n", io.ar.bits.addr) } @@ -408,7 +408,7 @@ class NastiErrorSlave(implicit val p: Parameters) extends NastiModule { /** Take a single Nasti master and route its requests to various slaves * @param addrmap a sequence of base address + memory size pairs, * on for each slave interface */ -class NastiRouter(addrmap: Seq[(BigInt, BigInt)])(implicit val p: Parameters) extends NastiModule { +class NastiRouter(addrmap: Seq[(BigInt, BigInt)])(implicit p: Parameters) extends NastiModule { val nSlaves = addrmap.size val io = new Bundle { @@ -488,7 +488,7 @@ class NastiRouter(addrmap: Seq[(BigInt, BigInt)])(implicit val p: Parameters) ex * @param addrmap a sequence of base - size pairs; * size of addrmap should be nSlaves */ class NastiCrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) - (implicit val p: Parameters) extends NastiModule { + (implicit p: Parameters) extends NastiModule { val io = new Bundle { val masters = Vec(new NastiIO, nMasters).flip val slaves = Vec(new NastiIO, nSlaves) @@ -507,112 +507,6 @@ class NastiCrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) } } -object AddrMapConsts { - val R = 0x4 - val W = 0x2 - val X = 0x1 - val RW = R | W - val RX = R | X - val RWX = R | W | X -} - -class AddrMapProt extends Bundle { - val r = Bool() - val w = Bool() - val x = Bool() -} - -abstract class MemRegion { def size: BigInt } - -case class MemSize(size: BigInt, prot: Int) extends MemRegion - -case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion - -//object Submap { -// def apply(size: BigInt, entries: AddrMapEntry*) = -// new MemSubmap(size, entries) -//} - -case class AddrMapEntry(name: String, start: Option[BigInt], region: MemRegion) - -case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int) - -class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[AddrMapEntry] { - - def apply(index: Int): AddrMapEntry = entries(index) - - def length: Int = entries.size - - def countSlaves: Int = { - this map { entry: AddrMapEntry => entry.region match { - case MemSize(_, _) => 1 - case MemSubmap(_, submap) => submap.countSlaves - }} reduceLeft(_ + _) - } -} - -object AddrMap { - def apply(elems: AddrMapEntry*): AddrMap = new AddrMap(elems) -} - -class AddrHashMap(addrmap: AddrMap) { - val mapping = new HashMap[String, AddrHashMapEntry] - - private def genPairs(am: AddrMap): Seq[(String, AddrHashMapEntry)] = { - var ind = 0 - var base = BigInt(0) - var pairs = Seq[(String, AddrHashMapEntry)]() - am.foreach { case AddrMapEntry(name, startOpt, region) => - region match { - case MemSize(size, prot) => { - if (!startOpt.isEmpty) base = startOpt.get - pairs = (name, AddrHashMapEntry(ind, base, size, prot)) +: pairs - base += size - ind += 1 - } - case MemSubmap(size, submap) => { - if (!startOpt.isEmpty) base = startOpt.get - val subpairs = genPairs(submap).map { - case (subname, AddrHashMapEntry(subind, subbase, subsize, prot)) => - (name + ":" + subname, - AddrHashMapEntry(ind + subind, base + subbase, subsize, prot)) - } - pairs = subpairs ++ pairs - ind += subpairs.size - base += size - } - } - } - pairs - } - - for ((name, ind) <- genPairs(addrmap)) { mapping(name) = ind } - - def nEntries: Int = mapping.size - def apply(name: String): AddrHashMapEntry = mapping(name) - def get(name: String): Option[AddrHashMapEntry] = mapping.get(name) - def sortedEntries(): Seq[(String, BigInt, BigInt, Int)] = { - val arr = new Array[(String, BigInt, BigInt, Int)](mapping.size) - mapping.foreach { case (name, AddrHashMapEntry(port, base, size, prot)) => - arr(port) = (name, base, size, prot) - } - arr.toSeq - } - - def isValid(addr: UInt): Bool = { - sortedEntries().map { case (_, base, size, _) => - addr >= UInt(base) && addr < UInt(base + size) - }.reduceLeft(_ || _) - } - - def getProt(addr: UInt): AddrMapProt = { - Mux1H(sortedEntries().map { case (_, base, size, prot) => - (addr >= UInt(base) && addr < UInt(base + size), - new AddrMapProt().fromBits(Bits(prot, 3))) - }) - } -} - class NastiInterconnectIO(val nMasters: Int, val nSlaves: Int) (implicit p: Parameters) extends Bundle { /* This is a bit confusing. The interconnect is a slave to the masters and @@ -623,7 +517,7 @@ class NastiInterconnectIO(val nMasters: Int, val nSlaves: Int) new NastiInterconnectIO(nMasters, nSlaves).asInstanceOf[this.type] } -abstract class NastiInterconnect extends NastiModule { +abstract class NastiInterconnect(implicit p: Parameters) extends NastiModule()(p) { val nMasters: Int val nSlaves: Int @@ -635,7 +529,7 @@ class NastiRecursiveInterconnect( val nSlaves: Int, addrmap: AddrMap, base: BigInt = 0) - (implicit val p: Parameters) extends NastiInterconnect { + (implicit p: Parameters) extends NastiInterconnect { var lastEnd = base var slaveInd = 0 val levelSize = addrmap.size @@ -678,7 +572,7 @@ class NastiRecursiveInterconnect( } class NastiTopInterconnect(val nMasters: Int, val nSlaves: Int) - (implicit val p: Parameters) extends NastiInterconnect { + (implicit p: Parameters) extends NastiInterconnect { val temp = Module(new NastiRecursiveInterconnect(nMasters, nSlaves, p(NastiAddrMap))) temp.io.masters.zip(io.masters).foreach { case (t, i) => diff --git a/junctions/src/main/scala/package.scala b/junctions/src/main/scala/package.scala index deb7549d..3181064e 100644 --- a/junctions/src/main/scala/package.scala +++ b/junctions/src/main/scala/package.scala @@ -1 +1 @@ -package object junctions extends HASTIConstants with POCIConstants +package object junctions extends HastiConstants with PociConstants diff --git a/junctions/src/main/scala/poci.scala b/junctions/src/main/scala/poci.scala index bfd581c7..bac75966 100644 --- a/junctions/src/main/scala/poci.scala +++ b/junctions/src/main/scala/poci.scala @@ -2,13 +2,13 @@ package junctions import Chisel._ -abstract trait POCIConstants +abstract trait PociConstants { val SZ_PADDR = 32 val SZ_PDATA = 32 } -class POCIIO extends Bundle +class PociIO extends Bundle { val paddr = UInt(OUTPUT, SZ_PADDR) val pwrite = Bool(OUTPUT) @@ -20,11 +20,10 @@ class POCIIO extends Bundle val pslverr = Bool(INPUT) } -class HASTItoPOCIBridge extends Module -{ +class HastiToPociBridge(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { - val in = new HASTISlaveIO - val out = new POCIIO + val in = new HastiSlaveIO + val out = new PociIO } val s_idle :: s_setup :: s_access :: Nil = Enum(UInt(), 3) @@ -62,11 +61,11 @@ class HASTItoPOCIBridge extends Module io.in.hresp := io.out.pslverr } -class POCIBus(amap: Seq[UInt=>Bool]) extends Module +class PociBus(amap: Seq[UInt=>Bool]) extends Module { val io = new Bundle { - val master = new POCIIO().flip - val slaves = Vec(new POCIIO, amap.size) + val master = new PociIO().flip + val slaves = Vec(new PociIO, amap.size) } val psels = PriorityEncoderOH( diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index 52d98779..7b9c5537 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -88,7 +88,7 @@ class SMIArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) } class SMIIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) - (implicit val p: Parameters) extends NastiModule { + (implicit p: Parameters) extends NastiModule()(p) { val io = new Bundle { val ar = Decoupled(new NastiReadAddressChannel).flip val r = Decoupled(new NastiReadDataChannel) @@ -170,7 +170,7 @@ class SMIIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) } class SMIIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) - (implicit val p: Parameters) extends NastiModule { + (implicit p: Parameters) extends NastiModule()(p) { val io = new Bundle { val aw = Decoupled(new NastiWriteAddressChannel).flip val w = Decoupled(new NastiWriteDataChannel).flip @@ -251,7 +251,7 @@ class SMIIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) /** Convert Nasti protocol to SMI protocol */ class SMIIONastiIOConverter(val dataWidth: Int, val addrWidth: Int) - (implicit val p: Parameters) extends NastiModule { + (implicit p: Parameters) extends NastiModule()(p) { val io = new Bundle { val nasti = (new NastiIO).flip val smi = new SMIIO(dataWidth, addrWidth) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index cefe8470..62b1e189 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -6,7 +6,7 @@ object bigIntPow2 { def apply(in: BigInt): Boolean = in > 0 && ((in & (in-1)) == 0) } -class ParameterizedBundle(implicit val p: Parameters) extends Bundle { +class ParameterizedBundle(implicit p: Parameters) extends Bundle { override def cloneType = this.getClass.getConstructors.head.newInstance(p).asInstanceOf[this.type] } From 166df221ad2d910e25c0f9173ffc3ba261b6c81a Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Tue, 6 Oct 2015 18:14:51 -0700 Subject: [PATCH 030/116] added HasAddrMapParameters --- junctions/src/main/scala/addrmap.scala | 27 ++++++++++++++++++++++++ junctions/src/main/scala/memserdes.scala | 9 -------- junctions/src/main/scala/nasti.scala | 6 ++---- 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index f37111c9..8e6dea4e 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -5,6 +5,33 @@ package junctions import Chisel._ import scala.collection.mutable.HashMap +case object PAddrBits extends Field[Int] +case object VAddrBits extends Field[Int] +case object PgIdxBits extends Field[Int] +case object PgLevels extends Field[Int] +case object PgLevelBits extends Field[Int] +case object ASIdBits extends Field[Int] +case object PPNBits extends Field[Int] +case object VPNBits extends Field[Int] + +case object GlobalAddrMap extends Field[AddrMap] +case object MMIOBase extends Field[BigInt] + +trait HasAddrMapParameters { + implicit val p: Parameters + + val paddrBits = p(PAddrBits) + val vaddrBits = p(VAddrBits) + val pgIdxBits = p(PgIdxBits) + val ppnBits = p(PPNBits) + val vpnBits = p(VPNBits) + val pgLevels = p(PgLevels) + val pgLevelBits = p(PgLevelBits) + val asIdBits = p(ASIdBits) + + val addrMap = new AddrHashMap(p(GlobalAddrMap)) +} + abstract class MemRegion { def size: BigInt } case class MemSize(size: BigInt, prot: Int) extends MemRegion diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index a439c8a9..2fc8ace1 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -4,15 +4,6 @@ package junctions import Chisel._ import scala.math._ -case object PAddrBits extends Field[Int] -case object VAddrBits extends Field[Int] -case object PgIdxBits extends Field[Int] -case object PgLevels extends Field[Int] -case object PgLevelBits extends Field[Int] -case object ASIdBits extends Field[Int] -case object PPNBits extends Field[Int] -case object VPNBits extends Field[Int] - case object MIFAddrBits extends Field[Int] case object MIFDataBits extends Field[Int] case object MIFTagBits extends Field[Int] diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 32e25228..f57ff9fc 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -6,8 +6,6 @@ import scala.math.max import scala.collection.mutable.ArraySeq case object NastiKey extends Field[NastiParameters] -case object NastiAddrMap extends Field[AddrMap] -case object MMIOBase extends Field[BigInt] case class NastiParameters(dataBits: Int, addrBits: Int, idBits: Int) @@ -571,9 +569,9 @@ class NastiRecursiveInterconnect( } } -class NastiTopInterconnect(val nMasters: Int, val nSlaves: Int) +class NastiTopInterconnect(val nMasters: Int, val nSlaves: Int, addrMap: AddrMap) (implicit p: Parameters) extends NastiInterconnect { - val temp = Module(new NastiRecursiveInterconnect(nMasters, nSlaves, p(NastiAddrMap))) + val temp = Module(new NastiRecursiveInterconnect(nMasters, nSlaves, addrMap)) temp.io.masters.zip(io.masters).foreach { case (t, i) => t.ar <> i.ar From 2fee3fd0fd634a97bc0bf8a5980c994cba5d9c7c Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 13 Oct 2015 12:44:48 -0700 Subject: [PATCH 031/116] make sure NASTI -> SMI converter still works if words per beat is 1 --- junctions/src/main/scala/smi.scala | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index ec2e554e..d2d65067 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -215,13 +215,14 @@ class SMIIONASTIWriteIOConverter(val dataWidth: Int, val addrWidth: Int) io.b.valid := (state === s_resp) io.b.bits := NASTIWriteResponseChannel(id) - val jump = PriorityMux(strb(maxWordsPerBeat - 1, 1), - (1 until maxWordsPerBeat).map(UInt(_))) + val jump = if (maxWordsPerBeat > 1) + PriorityMux(strb(maxWordsPerBeat - 1, 1), + (1 until maxWordsPerBeat).map(UInt(_))) + else UInt(1) when (io.aw.fire()) { addr := io.aw.bits.addr(addrOffBits - 1, byteOffBits) id := io.aw.bits.id - //size := io.aw.bits.size - UInt(byteOffBits) size := io.aw.bits.size last := Bool(false) state := s_data From cedef980455e8aeb3d522b63b46ec88cd9bb532a Mon Sep 17 00:00:00 2001 From: Yunsup Lee Date: Mon, 19 Oct 2015 21:43:59 -0700 Subject: [PATCH 032/116] fix NASTI -> MemIO converter bug --- junctions/src/main/scala/nasti.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index f57ff9fc..5c2e7dc5 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -213,7 +213,7 @@ class MemIONastiIOConverter(cacheBlockOffsetBits: Int)(implicit p: Parameters) e when (io.nasti.w.fire() && io.nasti.w.bits.last) { b_ok := Bool(true) } val id_q = Module(new Queue(UInt(width = nastiWIdBits), 2)) - id_q.io.enq.valid := io.nasti.aw.valid + id_q.io.enq.valid := io.nasti.aw.valid && io.mem.req_cmd.ready id_q.io.enq.bits := io.nasti.aw.bits.id id_q.io.deq.ready := io.nasti.b.ready && b_ok From 11eacda84af68714f6134bc90c51e1aa35db1279 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 20 Oct 2015 18:36:19 -0700 Subject: [PATCH 033/116] generalize NastiReadDataArbiter --- junctions/src/main/scala/nasti.scala | 47 ++-------------- junctions/src/main/scala/util.scala | 84 ++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 43 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 5c2e7dc5..7d401655 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -311,48 +311,6 @@ class NastiArbiter(val arbN: Int)(implicit p: Parameters) extends NastiModule { } else { io.slave <> io.master.head } } -/** Locking RR arbiter for Nasti read data channel - * Arbiter locks until last message in channel is sent */ -class NastiReadDataArbiter(arbN: Int)(implicit p: Parameters) extends NastiModule { - val io = new Bundle { - val in = Vec(Decoupled(new NastiReadDataChannel), arbN).flip - val out = Decoupled(new NastiReadDataChannel) - } - - def rotateLeft[T <: Data](norm: Vec[T], rot: UInt): Vec[T] = { - val n = norm.size - Vec.tabulate(n) { i => - Mux(rot < UInt(n - i), norm(UInt(i) + rot), norm(rot - UInt(n - i))) - } - } - - val lockIdx = Reg(init = UInt(0, log2Up(arbN))) - val locked = Reg(init = Bool(false)) - - // use rotation to give priority to the input after the last one granted - val choice = PriorityMux( - rotateLeft(Vec(io.in.map(_.valid)), lockIdx + UInt(1)), - rotateLeft(Vec((0 until arbN).map(UInt(_))), lockIdx + UInt(1))) - - val chosen = Mux(locked, lockIdx, choice) - - for (i <- 0 until arbN) { - io.in(i).ready := io.out.ready && chosen === UInt(i) - } - - io.out.valid := io.in(chosen).valid - io.out.bits := io.in(chosen).bits - - when (io.out.fire()) { - when (!locked) { - lockIdx := choice - locked := !io.out.bits.last - } .elsewhen (io.out.bits.last) { - locked := Bool(false) - } - } -} - /** A slave that send decode error for every request it receives */ class NastiErrorSlave(implicit p: Parameters) extends NastiModule { val io = (new NastiIO).flip @@ -466,7 +424,10 @@ class NastiRouter(addrmap: Seq[(BigInt, BigInt)])(implicit p: Parameters) extend io.master.w.ready := w_ready || err_slave.io.w.ready val b_arb = Module(new RRArbiter(new NastiWriteResponseChannel, nSlaves + 1)) - val r_arb = Module(new NastiReadDataArbiter(nSlaves + 1)) + val r_arb = Module(new JunctionsPeekingArbiter( + new NastiReadDataChannel, nSlaves + 1, + // we can unlock if it's the last beat + (r: NastiReadDataChannel) => r.last)) for (i <- 0 until nSlaves) { b_arb.io.in(i) <> io.slave(i).b diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index 62b1e189..9f59ba4f 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -58,3 +58,87 @@ object HellaQueue { q.io.deq } } + +/** A generalized locking RR arbiter that addresses the limitations of the + * version in the Chisel standard library */ +abstract class JunctionsAbstractLockingArbiter[T <: Data](typ: T, arbN: Int) + extends Module { + + val io = new Bundle { + val in = Vec(Decoupled(typ.cloneType), arbN).flip + val out = Decoupled(typ.cloneType) + } + + def rotateLeft[T <: Data](norm: Vec[T], rot: UInt): Vec[T] = { + val n = norm.size + Vec.tabulate(n) { i => + Mux(rot < UInt(n - i), norm(UInt(i) + rot), norm(rot - UInt(n - i))) + } + } + + val lockIdx = Reg(init = UInt(0, log2Up(arbN))) + val locked = Reg(init = Bool(false)) + + val choice = PriorityMux( + rotateLeft(Vec(io.in.map(_.valid)), lockIdx + UInt(1)), + rotateLeft(Vec((0 until arbN).map(UInt(_))), lockIdx + UInt(1))) + + val chosen = Mux(locked, lockIdx, choice) + + for (i <- 0 until arbN) { + io.in(i).ready := io.out.ready && chosen === UInt(i) + } + + io.out.valid := io.in(chosen).valid + io.out.bits := io.in(chosen).bits +} + +/** This locking arbiter determines when it is safe to unlock + * by peeking at the data */ +class JunctionsPeekingArbiter[T <: Data]( + typ: T, arbN: Int, + canUnlock: T => Bool, + needsLock: Option[T => Bool] = None) + extends JunctionsAbstractLockingArbiter(typ, arbN) { + + def realNeedsLock(data: T): Bool = + needsLock.map(_(data)).getOrElse(Bool(true)) + + when (io.out.fire()) { + when (!locked && realNeedsLock(io.out.bits)) { + lockIdx := choice + locked := Bool(true) + } + // the unlock statement takes precedent + when (canUnlock(io.out.bits)) { + locked := Bool(false) + } + } +} + +/** This arbiter determines when it is safe to unlock by counting transactions */ +class JunctionsCountingArbiter[T <: Data]( + typ: T, arbN: Int, count: Int, + val needsLock: Option[T => Bool] = None) + extends JunctionsAbstractLockingArbiter(typ, arbN) { + + def realNeedsLock(data: T): Bool = + needsLock.map(_(data)).getOrElse(Bool(true)) + + // if count is 1, you should use a non-locking arbiter + require(count > 1, "CountingArbiter cannot have count <= 1") + + val lock_ctr = Counter(count) + + when (io.out.fire()) { + when (!locked && realNeedsLock(io.out.bits)) { + lockIdx := choice + locked := Bool(true) + lock_ctr.inc() + } + + when (locked) { + when (lock_ctr.inc()) { locked := Bool(false) } + } + } +} From 9c3cd8f9fea601a3a916fafc1fc970633bf2c882 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Wed, 21 Oct 2015 18:15:46 -0700 Subject: [PATCH 034/116] depend on external cde library --- junctions/src/main/scala/addrmap.scala | 1 + junctions/src/main/scala/hasti.scala | 1 + junctions/src/main/scala/memserdes.scala | 1 + junctions/src/main/scala/nasti.scala | 1 + junctions/src/main/scala/poci.scala | 1 + junctions/src/main/scala/smi.scala | 1 + junctions/src/main/scala/util.scala | 1 + 7 files changed, 7 insertions(+) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index 8e6dea4e..d5e7292e 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -3,6 +3,7 @@ package junctions import Chisel._ +import cde.{Parameters, Field} import scala.collection.mutable.HashMap case object PAddrBits extends Field[Int] diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 7a913dfe..f68ff2f1 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -1,6 +1,7 @@ package junctions import Chisel._ +import cde.{Parameters, Field} trait HastiConstants { diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index 2fc8ace1..c8bfd7b6 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -3,6 +3,7 @@ package junctions import Chisel._ import scala.math._ +import cde.{Parameters, Field} case object MIFAddrBits extends Field[Int] case object MIFDataBits extends Field[Int] diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 7d401655..0f4d8b7c 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -4,6 +4,7 @@ package junctions import Chisel._ import scala.math.max import scala.collection.mutable.ArraySeq +import cde.{Parameters, Field} case object NastiKey extends Field[NastiParameters] diff --git a/junctions/src/main/scala/poci.scala b/junctions/src/main/scala/poci.scala index bac75966..0ba4bb58 100644 --- a/junctions/src/main/scala/poci.scala +++ b/junctions/src/main/scala/poci.scala @@ -1,6 +1,7 @@ package junctions import Chisel._ +import cde.{Parameters, Field} abstract trait PociConstants { diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index 7d26deb7..96616b1e 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -1,6 +1,7 @@ package junctions import Chisel._ +import cde.Parameters class SMIReq(val dataWidth: Int, val addrWidth: Int) extends Bundle { val rw = Bool() diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index 9f59ba4f..16950cd1 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -1,6 +1,7 @@ /// See LICENSE for license details. package junctions import Chisel._ +import cde.Parameters object bigIntPow2 { def apply(in: BigInt): Boolean = in > 0 && ((in & (in-1)) == 0) From 8fe4917d8e074f18606dfa51b6eb4b0e51a33932 Mon Sep 17 00:00:00 2001 From: Jim Lawson Date: Thu, 22 Oct 2015 09:52:26 -0700 Subject: [PATCH 035/116] Add ability to generate libraryDependency on cde. --- junctions/build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/build.sbt b/junctions/build.sbt index cd79f1ee..bae39ed9 100644 --- a/junctions/build.sbt +++ b/junctions/build.sbt @@ -7,7 +7,7 @@ name := "junctions" scalaVersion := "2.11.6" // Provide a managed dependency on chisel if -DchiselVersion="" is supplied on the command line. -libraryDependencies ++= (Seq("chisel").map { +libraryDependencies ++= (Seq("chisel","cde").map { dep: String => sys.props.get(dep + "Version") map { "edu.berkeley.cs" %% dep % _ }}).flatten site.settings From 3270d17ad3f5b4434fe541c84c32cc8583af6c98 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Fri, 23 Oct 2015 16:25:17 -0700 Subject: [PATCH 036/116] add MultiChannel routing to Nasti interconnect generator --- junctions/src/main/scala/addrmap.scala | 14 ++- junctions/src/main/scala/nasti.scala | 127 +++++++++++++++---------- 2 files changed, 88 insertions(+), 53 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index d5e7292e..e5c792a1 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -36,8 +36,8 @@ trait HasAddrMapParameters { abstract class MemRegion { def size: BigInt } case class MemSize(size: BigInt, prot: Int) extends MemRegion - case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion +case class MemChannels(size: BigInt, nchannels: Int, prot: Int) extends MemRegion object AddrMapConsts { val R = 0x4 @@ -68,6 +68,7 @@ class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[Ad this map { entry: AddrMapEntry => entry.region match { case MemSize(_, _) => 1 case MemSubmap(_, submap) => submap.countSlaves + case MemChannels(_, nchannels, _) => nchannels }} reduceLeft(_ + _) } } @@ -102,6 +103,17 @@ class AddrHashMap(addrmap: AddrMap) { ind += subpairs.size base += size } + // every channel gets the same base and size + case MemChannels(size, nchannels, prot) => { + if (!startOpt.isEmpty) base = startOpt.get + val subpairs = (0 until nchannels).map { i => + val chname = name + ":" + i.toString + (chname, AddrHashMapEntry(ind + i, base, size, prot)) + } + pairs = subpairs ++ pairs + ind += nchannels + base += size + } } } pairs diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 0f4d8b7c..5c3b2e22 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -363,45 +363,32 @@ class NastiErrorSlave(implicit p: Parameters) extends NastiModule { } /** Take a single Nasti master and route its requests to various slaves - * @param addrmap a sequence of base address + memory size pairs, - * on for each slave interface */ -class NastiRouter(addrmap: Seq[(BigInt, BigInt)])(implicit p: Parameters) extends NastiModule { - val nSlaves = addrmap.size + * @param nSlaves the number of slaves + * @param routeSel a function which takes an address and produces + * a one-hot encoded selection of the slave to write to */ +class NastiRouter(nSlaves: Int, routeSel: UInt => UInt)(implicit p: Parameters) + extends NastiModule { val io = new Bundle { val master = (new NastiIO).flip val slave = Vec(new NastiIO, nSlaves) } + val ar_route = routeSel(io.master.ar.bits.addr) + val aw_route = routeSel(io.master.aw.bits.addr) + var ar_ready = Bool(false) var aw_ready = Bool(false) var w_ready = Bool(false) - var r_valid_addr = Bool(false) - var w_valid_addr = Bool(false) - addrmap.zip(io.slave).zipWithIndex.foreach { case (((base, size), s), i) => - val bound = base + size - - require(bigIntPow2(size), - s"Region size $size is not a power of 2") - require(base % size == 0, - f"Region base address $base%x not divisible by $size%d" ) - - val ar_addr = io.master.ar.bits.addr - val ar_match = ar_addr >= UInt(base) && ar_addr < UInt(bound) - - s.ar.valid := io.master.ar.valid && ar_match + io.slave.zipWithIndex.foreach { case (s, i) => + s.ar.valid := io.master.ar.valid && ar_route(i) s.ar.bits := io.master.ar.bits - ar_ready = ar_ready || (s.ar.ready && ar_match) - r_valid_addr = r_valid_addr || ar_match + ar_ready = ar_ready || (s.ar.ready && ar_route(i)) - val aw_addr = io.master.aw.bits.addr - val aw_match = aw_addr >= UInt(base) && aw_addr < UInt(bound) - - s.aw.valid := io.master.aw.valid && aw_match + s.aw.valid := io.master.aw.valid && aw_route(i) s.aw.bits := io.master.aw.bits - aw_ready = aw_ready || (s.aw.ready && aw_match) - w_valid_addr = w_valid_addr || aw_match + aw_ready = aw_ready || (s.aw.ready && aw_route(i)) val chosen = Reg(init = Bool(false)) when (s.aw.fire()) { chosen := Bool(true) } @@ -412,16 +399,19 @@ class NastiRouter(addrmap: Seq[(BigInt, BigInt)])(implicit p: Parameters) extend w_ready = w_ready || (s.w.ready && chosen) } + val r_invalid = !ar_route.orR + val w_invalid = !aw_route.orR + val err_slave = Module(new NastiErrorSlave) - err_slave.io.ar.valid := !r_valid_addr && io.master.ar.valid + err_slave.io.ar.valid := r_invalid && io.master.ar.valid err_slave.io.ar.bits := io.master.ar.bits - err_slave.io.aw.valid := !w_valid_addr && io.master.aw.valid + err_slave.io.aw.valid := w_invalid && io.master.aw.valid err_slave.io.aw.bits := io.master.aw.bits err_slave.io.w.valid := io.master.w.valid err_slave.io.w.bits := io.master.w.bits - io.master.ar.ready := ar_ready || (!r_valid_addr && err_slave.io.ar.ready) - io.master.aw.ready := aw_ready || (!w_valid_addr && err_slave.io.aw.ready) + io.master.ar.ready := ar_ready || (r_invalid && err_slave.io.ar.ready) + io.master.aw.ready := aw_ready || (w_invalid && err_slave.io.aw.ready) io.master.w.ready := w_ready || err_slave.io.w.ready val b_arb = Module(new RRArbiter(new NastiWriteResponseChannel, nSlaves + 1)) @@ -445,25 +435,48 @@ class NastiRouter(addrmap: Seq[(BigInt, BigInt)])(implicit p: Parameters) extend /** Crossbar between multiple Nasti masters and slaves * @param nMasters the number of Nasti masters * @param nSlaves the number of Nasti slaves - * @param addrmap a sequence of base - size pairs; - * size of addrmap should be nSlaves */ -class NastiCrossbar(nMasters: Int, nSlaves: Int, addrmap: Seq[(BigInt, BigInt)]) + * @param routeSel a function selecting the slave to route an address to */ +class NastiCrossbar(nMasters: Int, nSlaves: Int, routeSel: UInt => UInt) (implicit p: Parameters) extends NastiModule { val io = new Bundle { val masters = Vec(new NastiIO, nMasters).flip val slaves = Vec(new NastiIO, nSlaves) } - val routers = Vec.fill(nMasters) { Module(new NastiRouter(addrmap)).io } - val arbiters = Vec.fill(nSlaves) { Module(new NastiArbiter(nMasters)).io } + if (nMasters == 1) { + val router = Module(new NastiRouter(nSlaves, routeSel)) + router.io.master <> io.masters.head + io.slaves <> router.io.slave + } else { + val routers = Vec.fill(nMasters) { Module(new NastiRouter(nSlaves, routeSel)).io } + val arbiters = Vec.fill(nSlaves) { Module(new NastiArbiter(nMasters)).io } - for (i <- 0 until nMasters) { - routers(i).master <> io.masters(i) + for (i <- 0 until nMasters) { + routers(i).master <> io.masters(i) + } + + for (i <- 0 until nSlaves) { + arbiters(i).master <> Vec(routers.map(r => r.slave(i))) + io.slaves(i) <> arbiters(i).slave + } } +} - for (i <- 0 until nSlaves) { - arbiters(i).master <> Vec(routers.map(r => r.slave(i))) - io.slaves(i) <> arbiters(i).slave +object NastiMultiChannelRouter { + def apply(master: NastiIO, nChannels: Int)(implicit p: Parameters): Vec[NastiIO] = { + require(isPow2(nChannels), "Number of channels must be power of 2") + if (nChannels == 1) { + Vec(master) + } else { + val dataBytes = p(MIFDataBits) * p(MIFDataBeats) / 8 + val selBits = log2Ceil(nChannels) + val routeSel = (addr: UInt) => { + Vec.tabulate(nChannels)(i => addr(selBits - 1, 0) === UInt(i)).toBits + } + val router = Module(new NastiRouter(nChannels, routeSel)) + router.io.master <> master + router.io.slave + } } } @@ -502,30 +515,40 @@ class NastiRecursiveInterconnect( lastEnd = start + size } - val flatSlaves = if (nMasters > 1) { - val xbar = Module(new NastiCrossbar(nMasters, levelSize, realAddrMap)) - xbar.io.masters <> io.masters - xbar.io.slaves - } else { - val router = Module(new NastiRouter(realAddrMap)) - router.io.master <> io.masters.head - router.io.slave + val routeSel = (addr: UInt) => { + Vec(realAddrMap.map { case (start, size) => + require(bigIntPow2(size), + s"Region size $size is not a power of 2") + require(base % size == 0, + f"Region base address $base%x not divisible by $size%d" ) + + addr >= UInt(start) && addr < UInt(start + size) + }).toBits } - addrmap.zip(realAddrMap).zipWithIndex.foreach { - case ((entry, (start, size)), i) => { + val xbar = Module(new NastiCrossbar(nMasters, levelSize, routeSel)) + xbar.io.masters <> io.masters + + addrmap.zip(realAddrMap).zip(xbar.io.slaves).zipWithIndex.foreach { + case (((entry, (start, size)), xbarSlave), i) => { entry.region match { case MemSize(_, _) => - io.slaves(slaveInd) <> flatSlaves(i) + io.slaves(slaveInd) <> xbarSlave slaveInd += 1 case MemSubmap(_, submap) => val subSlaves = submap.countSlaves val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, start)) - ic.io.masters.head <> flatSlaves(i) + ic.io.masters.head <> xbarSlave io.slaves.drop(slaveInd).take(subSlaves).zip(ic.io.slaves).foreach { case (s, m) => s <> m } slaveInd += subSlaves + case MemChannels(_, nchannels, _) => + val routerSlaves = NastiMultiChannelRouter(xbarSlave, nchannels) + io.slaves.drop(slaveInd).take(nchannels).zip(routerSlaves).foreach { + case (s, m) => s <> m + } + slaveInd += nchannels } } } From 5440d6c2ae8ec23b3c69ea2809d0b41ba85b6460 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 26 Oct 2015 12:23:03 -0700 Subject: [PATCH 037/116] balance MultiChannel router correctly --- junctions/src/main/scala/nasti.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 5c3b2e22..df1a1bd1 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -469,9 +469,12 @@ object NastiMultiChannelRouter { Vec(master) } else { val dataBytes = p(MIFDataBits) * p(MIFDataBeats) / 8 + val selOffset = log2Up(dataBytes) val selBits = log2Ceil(nChannels) + // Consecutive blocks route to alternating channels val routeSel = (addr: UInt) => { - Vec.tabulate(nChannels)(i => addr(selBits - 1, 0) === UInt(i)).toBits + val sel = addr(selOffset + selBits - 1, selOffset) + Vec.tabulate(nChannels)(i => sel === UInt(i)).toBits } val router = Module(new NastiRouter(nChannels, routeSel)) router.io.master <> master From 096dbb3c2d226b87c033e2dd50458d342701d66f Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 26 Oct 2015 14:14:53 -0700 Subject: [PATCH 038/116] get rid of NastiTopInterconnect --- junctions/src/main/scala/nasti.scala | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index df1a1bd1..0e9a4ee8 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -556,20 +556,3 @@ class NastiRecursiveInterconnect( } } } - -class NastiTopInterconnect(val nMasters: Int, val nSlaves: Int, addrMap: AddrMap) - (implicit p: Parameters) extends NastiInterconnect { - val temp = Module(new NastiRecursiveInterconnect(nMasters, nSlaves, addrMap)) - - temp.io.masters.zip(io.masters).foreach { case (t, i) => - t.ar <> i.ar - t.aw <> i.aw - // this queue is necessary to break up the aw - w dependence - // introduced by the TileLink -> Nasti converter - t.w <> Queue(i.w) - i.b <> t.b - i.r <> t.r - } - //temp.io.masters <> io.masters - io.slaves <> temp.io.slaves -} From 3e906c8620e01e053a32ff94cbece4344103fb2c Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 2 Nov 2015 22:39:50 -0800 Subject: [PATCH 039/116] shave off channel select bits in MultiChannel router --- junctions/src/main/scala/nasti.scala | 50 +++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 5 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 0e9a4ee8..e3539a1d 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -464,7 +464,6 @@ class NastiCrossbar(nMasters: Int, nSlaves: Int, routeSel: UInt => UInt) object NastiMultiChannelRouter { def apply(master: NastiIO, nChannels: Int)(implicit p: Parameters): Vec[NastiIO] = { - require(isPow2(nChannels), "Number of channels must be power of 2") if (nChannels == 1) { Vec(master) } else { @@ -483,6 +482,47 @@ object NastiMultiChannelRouter { } } + +class NastiMultiChannelRouter(nChannels: Int) + (implicit p: Parameters) extends NastiModule { + val io = new Bundle { + val master = (new NastiIO).flip + val slaves = Vec(new NastiIO, nChannels) + } + + require(isPow2(nChannels), "Number of channels must be power of 2") + + if (nChannels == 1) { + io.slaves.head <> io.master + } else { + val dataBytes = p(MIFDataBits) * p(MIFDataBeats) / 8 + val selOffset = log2Up(dataBytes) + val selBits = log2Ceil(nChannels) + val blockOffset = selOffset + selBits + + // Consecutive blocks route to alternating channels + val routeSel = (addr: UInt) => { + val sel = addr(blockOffset - 1, selOffset) + Vec.tabulate(nChannels)(i => sel === UInt(i)).toBits + } + + val router = Module(new NastiRouter(nChannels, routeSel)) + router.io.master <> io.master + + def cutSelectBits(addr: UInt): UInt = { + Cat(addr(nastiXAddrBits - 1, blockOffset), + addr(selOffset - 1, 0)) + } + + io.slaves.zip(router.io.slave).foreach { case (outer, inner) => + // Cut the selection bits out of the slave address channels + outer <> inner + outer.ar.bits.addr := cutSelectBits(inner.ar.bits.addr) + outer.aw.bits.addr := cutSelectBits(inner.aw.bits.addr) + } + } +} + class NastiInterconnectIO(val nMasters: Int, val nSlaves: Int) (implicit p: Parameters) extends Bundle { /* This is a bit confusing. The interconnect is a slave to the masters and @@ -547,10 +587,10 @@ class NastiRecursiveInterconnect( } slaveInd += subSlaves case MemChannels(_, nchannels, _) => - val routerSlaves = NastiMultiChannelRouter(xbarSlave, nchannels) - io.slaves.drop(slaveInd).take(nchannels).zip(routerSlaves).foreach { - case (s, m) => s <> m - } + val outChannels = Vec(io.slaves.drop(slaveInd).take(nchannels)) + val router = Module(new NastiMultiChannelRouter(nchannels)) + router.io.master <> xbarSlave + outChannels <> router.io.slaves slaveInd += nchannels } } From e7e281275a9ca685fc9c51bbe124e24bca1f3a47 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 18 Nov 2015 12:15:56 -0800 Subject: [PATCH 040/116] implement MultiChannel routing in a specialized (and more performant) way --- junctions/src/main/scala/nasti.scala | 144 ++++++++++++++++----------- 1 file changed, 88 insertions(+), 56 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index e3539a1d..cb589785 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -482,47 +482,6 @@ object NastiMultiChannelRouter { } } - -class NastiMultiChannelRouter(nChannels: Int) - (implicit p: Parameters) extends NastiModule { - val io = new Bundle { - val master = (new NastiIO).flip - val slaves = Vec(new NastiIO, nChannels) - } - - require(isPow2(nChannels), "Number of channels must be power of 2") - - if (nChannels == 1) { - io.slaves.head <> io.master - } else { - val dataBytes = p(MIFDataBits) * p(MIFDataBeats) / 8 - val selOffset = log2Up(dataBytes) - val selBits = log2Ceil(nChannels) - val blockOffset = selOffset + selBits - - // Consecutive blocks route to alternating channels - val routeSel = (addr: UInt) => { - val sel = addr(blockOffset - 1, selOffset) - Vec.tabulate(nChannels)(i => sel === UInt(i)).toBits - } - - val router = Module(new NastiRouter(nChannels, routeSel)) - router.io.master <> io.master - - def cutSelectBits(addr: UInt): UInt = { - Cat(addr(nastiXAddrBits - 1, blockOffset), - addr(selOffset - 1, 0)) - } - - io.slaves.zip(router.io.slave).foreach { case (outer, inner) => - // Cut the selection bits out of the slave address channels - outer <> inner - outer.ar.bits.addr := cutSelectBits(inner.ar.bits.addr) - outer.aw.bits.addr := cutSelectBits(inner.aw.bits.addr) - } - } -} - class NastiInterconnectIO(val nMasters: Int, val nSlaves: Int) (implicit p: Parameters) extends Bundle { /* This is a bit confusing. The interconnect is a slave to the masters and @@ -545,26 +504,26 @@ class NastiRecursiveInterconnect( val nSlaves: Int, addrmap: AddrMap, base: BigInt = 0) - (implicit p: Parameters) extends NastiInterconnect { + (implicit p: Parameters) extends NastiInterconnect()(p) { var lastEnd = base var slaveInd = 0 val levelSize = addrmap.size val realAddrMap = new ArraySeq[(BigInt, BigInt)](addrmap.size) - addrmap.zipWithIndex.foreach { case (AddrMapEntry(_, startOpt, region), i) => + addrmap.zipWithIndex.foreach { case (AddrMapEntry(name, startOpt, region), i) => val start = startOpt.getOrElse(lastEnd) val size = region.size realAddrMap(i) = (start, size) lastEnd = start + size + + require(bigIntPow2(size), + s"Region $name size $size is not a power of 2") + require(start % size == 0, + f"Region $name start address 0x$start%x not divisible by 0x$size%x" ) } val routeSel = (addr: UInt) => { Vec(realAddrMap.map { case (start, size) => - require(bigIntPow2(size), - s"Region size $size is not a power of 2") - require(base % size == 0, - f"Region base address $base%x not divisible by $size%d" ) - addr >= UInt(start) && addr < UInt(start + size) }).toBits } @@ -580,19 +539,92 @@ class NastiRecursiveInterconnect( slaveInd += 1 case MemSubmap(_, submap) => val subSlaves = submap.countSlaves + val outputs = Vec(io.slaves.drop(slaveInd).take(subSlaves)) val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, start)) ic.io.masters.head <> xbarSlave - io.slaves.drop(slaveInd).take(subSlaves).zip(ic.io.slaves).foreach { - case (s, m) => s <> m - } + outputs <> ic.io.slaves slaveInd += subSlaves case MemChannels(_, nchannels, _) => - val outChannels = Vec(io.slaves.drop(slaveInd).take(nchannels)) - val router = Module(new NastiMultiChannelRouter(nchannels)) - router.io.master <> xbarSlave - outChannels <> router.io.slaves - slaveInd += nchannels + require(nchannels == 1, "Recursive interconnect cannot handle MultiChannel interface") + io.slaves(slaveInd) <> xbarSlave + slaveInd += 1 } } } } + +class ChannelHelper(nChannels: Int) + (implicit val p: Parameters) extends HasNastiParameters { + + val dataBytes = p(MIFDataBits) * p(MIFDataBeats) / 8 + val chanSelBits = log2Ceil(nChannels) + val selOffset = log2Up(dataBytes) + val blockOffset = selOffset + chanSelBits + + def getSelect(addr: UInt) = + addr(blockOffset - 1, selOffset) + + def getAddr(addr: UInt) = + Cat(addr(nastiXAddrBits - 1, blockOffset), addr(selOffset - 1, 0)) +} + +/** NASTI interconnect for multi-channel memory + regular IO + * We do routing for the memory channels differently from the IO ports + * Routing memory banks onto memory channels is done via arbiters + * (N-to-1 correspondence between banks and channels) + * Routing extra NASTI masters to memory requires a channel selecting router + * Routing anything to IO just uses standard recursive interconnect + */ +class NastiPerformanceInterconnect( + nBanksPerChannel: Int, + nChannels: Int, + nExtraMasters: Int, + nExtraSlaves: Int, + addrmap: AddrMap)(implicit p: Parameters) extends NastiInterconnect()(p) { + + val nBanks = nBanksPerChannel * nChannels + val nMasters = nBanks + nExtraMasters + val nSlaves = nChannels + nExtraSlaves + + val split = addrmap.head.region.size + val iomap = new AddrMap(addrmap.tail) + + def routeMemOrIO(addr: UInt): UInt = { + Cat(addr >= UInt(split), addr < UInt(split)) + } + + val chanHelper = new ChannelHelper(nChannels) + + def connectChannel(outer: NastiIO, inner: NastiIO) { + outer <> inner + outer.ar.bits.addr := chanHelper.getAddr(inner.ar.bits.addr) + outer.aw.bits.addr := chanHelper.getAddr(inner.aw.bits.addr) + } + + val topRouters = List.fill(nMasters){Module(new NastiRouter(2, routeMemOrIO(_)))} + topRouters.zip(io.masters).foreach { + case (router, master) => router.io.master <> master + } + val channelRouteFunc = (addr: UInt) => UIntToOH(chanHelper.getSelect(addr)) + val channelXbar = Module(new NastiCrossbar(nExtraMasters, nChannels, channelRouteFunc)) + channelXbar.io.masters <> topRouters.drop(nBanks).map(_.io.slave(0)) + + for (i <- 0 until nChannels) { + /* Bank assignments to channels are strided so that consecutive banks + * map to different channels. That way, consecutive cache lines also + * map to different channels */ + val banks = (i until nBanks by nChannels).map(j => topRouters(j).io.slave(0)) + val extra = channelXbar.io.slaves(i) + + val channelArb = Module(new NastiArbiter(nBanksPerChannel + nExtraMasters)) + channelArb.io.master <> (banks :+ extra) + connectChannel(io.slaves(i), channelArb.io.slave) + } + + val ioslaves = Vec(io.slaves.drop(nChannels)) + val iomasters = topRouters.map(_.io.slave(1)) + val ioxbar = Module(new NastiRecursiveInterconnect( + nMasters, nExtraSlaves, iomap, split)) + ioxbar.io.masters <> iomasters + ioslaves <> ioxbar.io.slaves +} From 70835761561c832f0789d8354004099fdc56033b Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 26 Nov 2015 12:57:04 -0800 Subject: [PATCH 041/116] fix typo in NastiErrorSlave --- junctions/src/main/scala/nasti.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index cb589785..4be7255f 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -342,7 +342,7 @@ class NastiErrorSlave(implicit p: Parameters) extends NastiModule { when (beats_left === UInt(0)) { responding := Bool(false) } .otherwise { - beats_left := beats_left - UInt(0) + beats_left := beats_left - UInt(1) } } From c57639b23ff4f145b813ad51241daaf5797ddb57 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Sat, 5 Dec 2015 00:26:16 -0800 Subject: [PATCH 042/116] reverse order of RWX bits for compatibility --- junctions/src/main/scala/addrmap.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index e5c792a1..808ef5ee 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -40,18 +40,18 @@ case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion case class MemChannels(size: BigInt, nchannels: Int, prot: Int) extends MemRegion object AddrMapConsts { - val R = 0x4 + val R = 0x1 val W = 0x2 - val X = 0x1 + val X = 0x4 val RW = R | W val RX = R | X val RWX = R | W | X } class AddrMapProt extends Bundle { - val r = Bool() - val w = Bool() val x = Bool() + val w = Bool() + val r = Bool() } case class AddrMapEntry(name: String, start: Option[BigInt], region: MemRegion) From 673f73b051d8b457280314b2ff977d8139821b3f Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 5 Jan 2016 20:04:49 -0800 Subject: [PATCH 043/116] add support for AXI streaming protocol --- junctions/src/main/scala/nasti.scala | 27 +++++++-- junctions/src/main/scala/stream.scala | 79 +++++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 5 deletions(-) create mode 100644 junctions/src/main/scala/stream.scala diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 4be7255f..a8fb4586 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -113,14 +113,29 @@ class NastiReadDataChannel(implicit p: Parameters) extends NastiResponseChannel( val user = UInt(width = nastiRUserBits) } +object NastiConstants { + val BURST_FIXED = UInt("b00") + val BURST_INCR = UInt("b01") + val BURST_WRAP = UInt("b10") + + val RESP_OKAY = UInt("b00") + val RESP_EXOKAY = UInt("b01") + val RESP_SLVERR = UInt("b10") + val RESP_DECERR = UInt("b11") +} + +import NastiConstants._ + object NastiWriteAddressChannel { - def apply(id: UInt, addr: UInt, size: UInt, len: UInt = UInt(0))(implicit p: Parameters) = { + def apply(id: UInt, addr: UInt, size: UInt, + len: UInt = UInt(0), burst: UInt = BURST_INCR) + (implicit p: Parameters) = { val aw = Wire(new NastiWriteAddressChannel) aw.id := id aw.addr := addr aw.len := len aw.size := size - aw.burst := UInt("b01") + aw.burst := burst aw.lock := Bool(false) aw.cache := UInt("b0000") aw.prot := UInt("b000") @@ -132,13 +147,15 @@ object NastiWriteAddressChannel { } object NastiReadAddressChannel { - def apply(id: UInt, addr: UInt, size: UInt, len: UInt = UInt(0))(implicit p: Parameters) = { + def apply(id: UInt, addr: UInt, size: UInt, + len: UInt = UInt(0), burst: UInt = BURST_INCR) + (implicit p: Parameters) = { val ar = Wire(new NastiReadAddressChannel) ar.id := id ar.addr := addr ar.len := len ar.size := size - ar.burst := UInt("b01") + ar.burst := burst ar.lock := Bool(false) ar.cache := UInt(0) ar.prot := UInt(0) @@ -333,7 +350,7 @@ class NastiErrorSlave(implicit p: Parameters) extends NastiModule { io.r.valid := r_queue.io.deq.valid && responding io.r.bits.id := r_queue.io.deq.bits.id io.r.bits.data := UInt(0) - io.r.bits.resp := Bits("b11") + io.r.bits.resp := RESP_DECERR io.r.bits.last := beats_left === UInt(0) r_queue.io.deq.ready := io.r.fire() && io.r.bits.last diff --git a/junctions/src/main/scala/stream.scala b/junctions/src/main/scala/stream.scala new file mode 100644 index 00000000..e49746df --- /dev/null +++ b/junctions/src/main/scala/stream.scala @@ -0,0 +1,79 @@ +package junctions + +import Chisel._ +import NastiConstants._ +import cde.Parameters + +class StreamChannel(w: Int) extends Bundle { + val data = UInt(width = w) + val last = Bool() + + override def cloneType = new StreamChannel(w).asInstanceOf[this.type] +} + +class StreamIO(w: Int) extends Bundle { + val out = Decoupled(new StreamChannel(w)) + val in = Decoupled(new StreamChannel(w)).flip + + override def cloneType = new StreamIO(w).asInstanceOf[this.type] +} + +class NastiIOStreamIOConverter(w: Int)(implicit p: Parameters) extends Module { + val io = new Bundle { + val nasti = (new NastiIO).flip + val stream = new StreamIO(w) + } + + val streamSize = UInt(log2Up(w / 8)) + assert(!io.nasti.ar.valid || io.nasti.ar.bits.size === streamSize, + "read channel wrong size on stream") + assert(!io.nasti.ar.valid || io.nasti.ar.bits.burst === BURST_FIXED, + "read channel wrong burst type on stream") + assert(!io.nasti.aw.valid || io.nasti.aw.bits.size === streamSize, + "write channel wrong size on stream") + assert(!io.nasti.aw.valid || io.nasti.aw.bits.burst === BURST_FIXED, + "write channel wrong burst type on stream") + + val read_id = Reg(io.nasti.ar.bits.id) + val reading = Reg(init = Bool(false)) + + io.nasti.ar.ready := !reading + io.nasti.r.valid := reading && io.stream.in.valid + io.nasti.r.bits := io.stream.in.bits + io.nasti.r.bits.resp := UInt(0) + io.nasti.r.bits.id := read_id + io.stream.in.ready := reading && io.nasti.r.ready + + when (io.nasti.ar.fire()) { + read_id := io.nasti.ar.bits.id + reading := Bool(true) + } + + when (io.nasti.r.fire() && io.nasti.r.bits.last) { + reading := Bool(false) + } + + val write_id = Reg(io.nasti.aw.bits.id) + val writing = Reg(init = Bool(false)) + val write_resp = Reg(init = Bool(false)) + + io.nasti.aw.ready := !writing && !write_resp + io.nasti.w.ready := writing && io.stream.out.ready + io.stream.out.valid := writing && io.nasti.w.valid + io.stream.out.bits := io.nasti.w.bits + io.nasti.b.valid := write_resp + io.nasti.b.bits.resp := UInt(0) + io.nasti.b.bits.id := write_id + + when (io.nasti.aw.fire()) { + write_id := io.nasti.aw.bits.id + writing := Bool(true) + } + + when (io.nasti.w.fire() && io.nasti.w.bits.last) { + writing := Bool(false) + write_resp := Bool(true) + } + + when (io.nasti.b.fire()) { write_resp := Bool(false) } +} From 9d2637c2c7f26a07292b9189bba01a8e4ce9a2e6 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 7 Jan 2016 11:55:19 -0800 Subject: [PATCH 044/116] support empty submaps in interconnect generator --- junctions/src/main/scala/nasti.scala | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index a8fb4586..428a0837 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -555,12 +555,17 @@ class NastiRecursiveInterconnect( io.slaves(slaveInd) <> xbarSlave slaveInd += 1 case MemSubmap(_, submap) => - val subSlaves = submap.countSlaves - val outputs = Vec(io.slaves.drop(slaveInd).take(subSlaves)) - val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, start)) - ic.io.masters.head <> xbarSlave - outputs <> ic.io.slaves - slaveInd += subSlaves + if (submap.isEmpty) { + val err_slave = Module(new NastiErrorSlave) + err_slave.io <> xbarSlave + } else { + val subSlaves = submap.countSlaves + val outputs = Vec(io.slaves.drop(slaveInd).take(subSlaves)) + val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, start)) + ic.io.masters.head <> xbarSlave + outputs <> ic.io.slaves + slaveInd += subSlaves + } case MemChannels(_, nchannels, _) => require(nchannels == 1, "Recursive interconnect cannot handle MultiChannel interface") io.slaves(slaveInd) <> xbarSlave From 5d7b5b219f6be834980e0d34aa29a1cbd0feee3e Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 11 Jan 2016 16:18:38 -0800 Subject: [PATCH 045/116] lowercase SMI to Smi --- junctions/src/main/scala/smi.scala | 56 +++++++++++++++--------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index 96616b1e..c7e27af2 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -3,35 +3,35 @@ package junctions import Chisel._ import cde.Parameters -class SMIReq(val dataWidth: Int, val addrWidth: Int) extends Bundle { +class SmiReq(val dataWidth: Int, val addrWidth: Int) extends Bundle { val rw = Bool() val addr = UInt(width = addrWidth) val data = Bits(width = dataWidth) override def cloneType = - new SMIReq(dataWidth, addrWidth).asInstanceOf[this.type] + new SmiReq(dataWidth, addrWidth).asInstanceOf[this.type] } /** Simple Memory Interface IO. Used to communicate with PCR and SCR * @param dataWidth the width in bits of the data field * @param addrWidth the width in bits of the addr field */ -class SMIIO(val dataWidth: Int, val addrWidth: Int) extends Bundle { - val req = Decoupled(new SMIReq(dataWidth, addrWidth)) +class SmiIO(val dataWidth: Int, val addrWidth: Int) extends Bundle { + val req = Decoupled(new SmiReq(dataWidth, addrWidth)) val resp = Decoupled(Bits(width = dataWidth)).flip override def cloneType = - new SMIIO(dataWidth, addrWidth).asInstanceOf[this.type] + new SmiIO(dataWidth, addrWidth).asInstanceOf[this.type] } -abstract class SMIPeripheral extends Module { +abstract class SmiPeripheral extends Module { val dataWidth: Int val addrWidth: Int - lazy val io = new SMIIO(dataWidth, addrWidth).flip + lazy val io = new SmiIO(dataWidth, addrWidth).flip } -/** A simple sequential memory accessed through SMI */ -class SMIMem(val dataWidth: Int, val memDepth: Int) extends SMIPeripheral { +/** A simple sequential memory accessed through Smi */ +class SmiMem(val dataWidth: Int, val memDepth: Int) extends SmiPeripheral { // override val addrWidth = log2Up(memDepth) @@ -52,21 +52,21 @@ class SMIMem(val dataWidth: Int, val memDepth: Int) extends SMIPeripheral { io.req.ready := !resp_valid } -/** Arbitrate among several SMI clients +/** Arbitrate among several Smi clients * @param n the number of clients - * @param dataWidth SMI data width - * @param addrWidth SMI address width */ -class SMIArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) + * @param dataWidth Smi data width + * @param addrWidth Smi address width */ +class SmiArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) extends Module { val io = new Bundle { - val in = Vec(new SMIIO(dataWidth, addrWidth), n).flip - val out = new SMIIO(dataWidth, addrWidth) + val in = Vec(new SmiIO(dataWidth, addrWidth), n).flip + val out = new SmiIO(dataWidth, addrWidth) } val wait_resp = Reg(init = Bool(false)) val choice = Reg(UInt(width = log2Up(n))) - val req_arb = Module(new RRArbiter(new SMIReq(dataWidth, addrWidth), n)) + val req_arb = Module(new RRArbiter(new SmiReq(dataWidth, addrWidth), n)) req_arb.io.in <> io.in.map(_.req) req_arb.io.out.ready := io.out.req.ready && !wait_resp @@ -88,12 +88,12 @@ class SMIArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) io.out.resp.ready := io.in(choice).resp.ready } -class SMIIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) +class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) (implicit p: Parameters) extends NastiModule()(p) { val io = new Bundle { val ar = Decoupled(new NastiReadAddressChannel).flip val r = Decoupled(new NastiReadDataChannel) - val smi = new SMIIO(dataWidth, addrWidth) + val smi = new SmiIO(dataWidth, addrWidth) } private val maxWordsPerBeat = nastiXDataBits / dataWidth @@ -170,13 +170,13 @@ class SMIIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) } } -class SMIIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) +class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) (implicit p: Parameters) extends NastiModule()(p) { val io = new Bundle { val aw = Decoupled(new NastiWriteAddressChannel).flip val w = Decoupled(new NastiWriteDataChannel).flip val b = Decoupled(new NastiWriteResponseChannel) - val smi = new SMIIO(dataWidth, addrWidth) + val smi = new SmiIO(dataWidth, addrWidth) } private val dataBytes = dataWidth / 8 @@ -185,7 +185,7 @@ class SMIIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) private val addrOffBits = addrWidth + byteOffBits assert(!io.aw.valid || io.aw.bits.size >= UInt(byteOffBits), - "Nasti size must be >= SMI size") + "Nasti size must be >= Smi size") val id = Reg(UInt(width = nastiWIdBits)) val addr = Reg(UInt(width = addrWidth)) @@ -251,26 +251,26 @@ class SMIIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) when (io.b.fire()) { state := s_idle } } -/** Convert Nasti protocol to SMI protocol */ -class SMIIONastiIOConverter(val dataWidth: Int, val addrWidth: Int) +/** Convert Nasti protocol to Smi protocol */ +class SmiIONastiIOConverter(val dataWidth: Int, val addrWidth: Int) (implicit p: Parameters) extends NastiModule()(p) { val io = new Bundle { val nasti = (new NastiIO).flip - val smi = new SMIIO(dataWidth, addrWidth) + val smi = new SmiIO(dataWidth, addrWidth) } - require(isPow2(dataWidth), "SMI data width must be power of 2") + require(isPow2(dataWidth), "Smi data width must be power of 2") - val reader = Module(new SMIIONastiReadIOConverter(dataWidth, addrWidth)) + val reader = Module(new SmiIONastiReadIOConverter(dataWidth, addrWidth)) reader.io.ar <> io.nasti.ar io.nasti.r <> reader.io.r - val writer = Module(new SMIIONastiWriteIOConverter(dataWidth, addrWidth)) + val writer = Module(new SmiIONastiWriteIOConverter(dataWidth, addrWidth)) writer.io.aw <> io.nasti.aw writer.io.w <> io.nasti.w io.nasti.b <> writer.io.b - val arb = Module(new SMIArbiter(2, dataWidth, addrWidth)) + val arb = Module(new SmiArbiter(2, dataWidth, addrWidth)) arb.io.in(0) <> reader.io.smi arb.io.in(1) <> writer.io.smi io.smi <> arb.io.out From c8fa7c43a9a3d11decdaeb02d8af5d103a54f9ef Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 14 Jan 2016 13:38:00 -0800 Subject: [PATCH 046/116] fix Chisel3 deprecation warnings --- junctions/src/main/scala/hasti.scala | 8 ++++---- junctions/src/main/scala/memserdes.scala | 2 +- junctions/src/main/scala/nasti.scala | 12 ++++++------ junctions/src/main/scala/poci.scala | 4 ++-- junctions/src/main/scala/smi.scala | 2 +- junctions/src/main/scala/util.scala | 6 +++--- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index f68ff2f1..fb38d1db 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -79,7 +79,7 @@ class HastiSlaveIO(implicit p: Parameters) extends HastiBundle()(p) { class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { val master = new HastiMasterIO().flip - val slaves = Vec(new HastiSlaveIO, amap.size).flip + val slaves = Vec(amap.size, new HastiSlaveIO).flip } // skid buffer @@ -149,7 +149,7 @@ class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModul class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { - val ins = Vec(new HastiSlaveIO, n) + val ins = Vec(n, new HastiSlaveIO) val out = new HastiSlaveIO().flip } @@ -219,8 +219,8 @@ class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool]) (implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { - val masters = Vec(new HastiMasterIO, nMasters).flip - val slaves = Vec(new HastiSlaveIO, addressMap.size).flip + val masters = Vec(nMasters, new HastiMasterIO).flip + val slaves = Vec(addressMap.size, new HastiSlaveIO).flip } val buses = List.fill(nMasters){Module(new HastiBus(addressMap))} diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index c8bfd7b6..52abf3c3 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -208,7 +208,7 @@ class MemDesser(w: Int)(implicit p: Parameters) extends Module // test rig side class MemIOArbiter(val arbN: Int)(implicit p: Parameters) extends MIFModule { val io = new Bundle { - val inner = Vec(new MemIO, arbN).flip + val inner = Vec(arbN, new MemIO).flip val outer = new MemIO } diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 428a0837..6624b702 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -263,7 +263,7 @@ class MemIONastiIOConverter(cacheBlockOffsetBits: Int)(implicit p: Parameters) e /** Arbitrate among arbN masters requesting to a single slave */ class NastiArbiter(val arbN: Int)(implicit p: Parameters) extends NastiModule { val io = new Bundle { - val master = Vec(new NastiIO, arbN).flip + val master = Vec(arbN, new NastiIO).flip val slave = new NastiIO } @@ -388,7 +388,7 @@ class NastiRouter(nSlaves: Int, routeSel: UInt => UInt)(implicit p: Parameters) val io = new Bundle { val master = (new NastiIO).flip - val slave = Vec(new NastiIO, nSlaves) + val slave = Vec(nSlaves, new NastiIO) } val ar_route = routeSel(io.master.ar.bits.addr) @@ -456,8 +456,8 @@ class NastiRouter(nSlaves: Int, routeSel: UInt => UInt)(implicit p: Parameters) class NastiCrossbar(nMasters: Int, nSlaves: Int, routeSel: UInt => UInt) (implicit p: Parameters) extends NastiModule { val io = new Bundle { - val masters = Vec(new NastiIO, nMasters).flip - val slaves = Vec(new NastiIO, nSlaves) + val masters = Vec(nMasters, new NastiIO).flip + val slaves = Vec(nSlaves, new NastiIO) } if (nMasters == 1) { @@ -503,8 +503,8 @@ class NastiInterconnectIO(val nMasters: Int, val nSlaves: Int) (implicit p: Parameters) extends Bundle { /* This is a bit confusing. The interconnect is a slave to the masters and * a master to the slaves. Hence why the declarations seem to be backwards. */ - val masters = Vec(new NastiIO, nMasters).flip - val slaves = Vec(new NastiIO, nSlaves) + val masters = Vec(nMasters, new NastiIO).flip + val slaves = Vec(nSlaves, new NastiIO) override def cloneType = new NastiInterconnectIO(nMasters, nSlaves).asInstanceOf[this.type] } diff --git a/junctions/src/main/scala/poci.scala b/junctions/src/main/scala/poci.scala index 0ba4bb58..01993fee 100644 --- a/junctions/src/main/scala/poci.scala +++ b/junctions/src/main/scala/poci.scala @@ -54,7 +54,7 @@ class HastiToPociBridge(implicit p: Parameters) extends HastiModule()(p) { io.out.paddr := haddr_reg io.out.pwrite := hwrite_reg(0) - io.out.psel := (state != s_idle) + io.out.psel := (state =/= s_idle) io.out.penable := (state === s_access) io.out.pwdata := io.in.hwdata io.in.hrdata := io.out.prdata @@ -66,7 +66,7 @@ class PociBus(amap: Seq[UInt=>Bool]) extends Module { val io = new Bundle { val master = new PociIO().flip - val slaves = Vec(new PociIO, amap.size) + val slaves = Vec(amap.size, new PociIO) } val psels = PriorityEncoderOH( diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index c7e27af2..cc314644 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -59,7 +59,7 @@ class SmiMem(val dataWidth: Int, val memDepth: Int) extends SmiPeripheral { class SmiArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) extends Module { val io = new Bundle { - val in = Vec(new SmiIO(dataWidth, addrWidth), n).flip + val in = Vec(n, new SmiIO(dataWidth, addrWidth)).flip val out = new SmiIO(dataWidth, addrWidth) } diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index 16950cd1..d4373164 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -22,7 +22,7 @@ class HellaFlowQueue[T <: Data](val entries: Int)(data: => T) extends Module { val maybe_full = Reg(init=Bool(false)) val enq_ptr = Counter(do_enq, entries)._1 val (deq_ptr, deq_done) = Counter(do_deq, entries) - when (do_enq != do_deq) { maybe_full := do_enq } + when (do_enq =/= do_deq) { maybe_full := do_enq } val ptr_match = enq_ptr === deq_ptr val empty = ptr_match && !maybe_full @@ -30,7 +30,7 @@ class HellaFlowQueue[T <: Data](val entries: Int)(data: => T) extends Module { val atLeastTwo = full || enq_ptr - deq_ptr >= UInt(2) do_flow := empty && io.deq.ready - val ram = SeqMem(data, entries) + val ram = SeqMem(entries, data) when (do_enq) { ram.write(enq_ptr, io.enq.bits) } val ren = io.deq.ready && (atLeastTwo || !io.deq.valid && !empty) @@ -66,7 +66,7 @@ abstract class JunctionsAbstractLockingArbiter[T <: Data](typ: T, arbN: Int) extends Module { val io = new Bundle { - val in = Vec(Decoupled(typ.cloneType), arbN).flip + val in = Vec(arbN, Decoupled(typ.cloneType)).flip val out = Decoupled(typ.cloneType) } From 335fb7312063f04c11a736fa321397f9eca4976c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 15 Jan 2016 15:16:54 -0800 Subject: [PATCH 047/116] Chisel3 compatibility fix No need for a Vec here. --- junctions/src/main/scala/nasti.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 6624b702..0baa463d 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -560,10 +560,11 @@ class NastiRecursiveInterconnect( err_slave.io <> xbarSlave } else { val subSlaves = submap.countSlaves - val outputs = Vec(io.slaves.drop(slaveInd).take(subSlaves)) + val outputs = io.slaves.drop(slaveInd).take(subSlaves) val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, start)) ic.io.masters.head <> xbarSlave - outputs <> ic.io.slaves + for ((o, s) <- outputs zip ic.io.slaves) + o <> s slaveInd += subSlaves } case MemChannels(_, nchannels, _) => From 6a0352c6d00eda11e4a16f138fe29fb2352dbe8d Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 14 Jan 2016 16:41:22 -0800 Subject: [PATCH 048/116] fix up SmiMem --- junctions/src/main/scala/smi.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index cc314644..b829abb3 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -35,7 +35,7 @@ class SmiMem(val dataWidth: Int, val memDepth: Int) extends SmiPeripheral { // override val addrWidth = log2Up(memDepth) - val mem = SeqMem(Bits(width = dataWidth), memDepth) + val mem = SeqMem(memDepth, Bits(width = dataWidth)) val ren = io.req.fire() && !io.req.bits.rw val wen = io.req.fire() && io.req.bits.rw From 19c526de598830836f8fa57b2cb60152f4766995 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 21 Jan 2016 15:37:07 -0800 Subject: [PATCH 049/116] move ReorderQueue and DecoupledHelper in from uncore --- junctions/src/main/scala/util.scala | 62 +++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index d4373164..c1559f5f 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -143,3 +143,65 @@ class JunctionsCountingArbiter[T <: Data]( } } } + +class ReorderQueueWrite[T <: Data](dType: T, tagWidth: Int) extends Bundle { + val data = dType.cloneType + val tag = UInt(width = tagWidth) + + override def cloneType = + new ReorderQueueWrite(dType, tagWidth).asInstanceOf[this.type] +} + +class ReorderEnqueueIO[T <: Data](dType: T, tagWidth: Int) + extends DecoupledIO(new ReorderQueueWrite(dType, tagWidth)) + +class ReorderDequeueIO[T <: Data](dType: T, tagWidth: Int) extends Bundle { + val valid = Bool(INPUT) + val tag = UInt(INPUT, tagWidth) + val data = dType.cloneType.asOutput + val matches = Bool(OUTPUT) + + override def cloneType = + new ReorderDequeueIO(dType, tagWidth).asInstanceOf[this.type] +} + +class ReorderQueue[T <: Data](dType: T, tagWidth: Int, size: Int) + extends Module { + val io = new Bundle { + val enq = new ReorderEnqueueIO(dType, tagWidth).flip + val deq = new ReorderDequeueIO(dType, tagWidth) + } + + val roq_data = Reg(Vec(size, dType.cloneType)) + val roq_tags = Reg(Vec(size, UInt(width = tagWidth))) + val roq_free = Reg(init = Vec.fill(size)(Bool(true))) + + val roq_enq_addr = PriorityEncoder(roq_free) + val roq_matches = roq_tags.zip(roq_free) + .map { case (tag, free) => tag === io.deq.tag && !free } + val roq_deq_addr = PriorityEncoder(roq_matches) + + io.enq.ready := roq_free.reduce(_ || _) + io.deq.data := roq_data(roq_deq_addr) + io.deq.matches := roq_matches.reduce(_ || _) + + when (io.enq.valid && io.enq.ready) { + roq_data(roq_enq_addr) := io.enq.bits.data + roq_tags(roq_enq_addr) := io.enq.bits.tag + roq_free(roq_enq_addr) := Bool(false) + } + + when (io.deq.valid) { + roq_free(roq_deq_addr) := Bool(true) + } +} + +object DecoupledHelper { + def apply(rvs: Bool*) = new DecoupledHelper(rvs) +} + +class DecoupledHelper(val rvs: Seq[Bool]) { + def fire(exclude: Bool, includes: Bool*) = { + (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) + } +} From a56a502ced9f358f720269fcd8f6c5aa2cb9318d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Sat, 23 Jan 2016 21:36:16 -0800 Subject: [PATCH 050/116] Add missing cloneType method @zhemao --- junctions/src/main/scala/util.scala | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index c1559f5f..865d63b5 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -153,7 +153,11 @@ class ReorderQueueWrite[T <: Data](dType: T, tagWidth: Int) extends Bundle { } class ReorderEnqueueIO[T <: Data](dType: T, tagWidth: Int) - extends DecoupledIO(new ReorderQueueWrite(dType, tagWidth)) + extends DecoupledIO(new ReorderQueueWrite(dType, tagWidth)) { + + override def cloneType = + new ReorderEnqueueIO(dType, tagWidth).asInstanceOf[this.type] +} class ReorderDequeueIO[T <: Data](dType: T, tagWidth: Int) extends Bundle { val valid = Bool(INPUT) From bfdf5a538aab01d9bbea7f33af4402c214df179a Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 14 Jan 2016 16:41:32 -0800 Subject: [PATCH 051/116] Separate memory interconnect from IO interconnect. Since we're separating memory and MMIO traffic in the L1 to L2 network, we won't need to route between memory and MMIO at the AXI interconnect. This means we can have separate (and simpler) AXI interconnects for each. One consequence of this is that the starting address of the IO interconnect can no longer be assumed to be 0 by default. --- junctions/src/main/scala/addrmap.scala | 42 +++++------- junctions/src/main/scala/nasti.scala | 93 ++++++-------------------- 2 files changed, 35 insertions(+), 100 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index 808ef5ee..5fa34483 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -30,14 +30,13 @@ trait HasAddrMapParameters { val pgLevelBits = p(PgLevelBits) val asIdBits = p(ASIdBits) - val addrMap = new AddrHashMap(p(GlobalAddrMap)) + val addrMap = new AddrHashMap(p(GlobalAddrMap), p(MMIOBase)) } abstract class MemRegion { def size: BigInt } case class MemSize(size: BigInt, prot: Int) extends MemRegion case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion -case class MemChannels(size: BigInt, nchannels: Int, prot: Int) extends MemRegion object AddrMapConsts { val R = 0x1 @@ -68,7 +67,6 @@ class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[Ad this map { entry: AddrMapEntry => entry.region match { case MemSize(_, _) => 1 case MemSubmap(_, submap) => submap.countSlaves - case MemChannels(_, nchannels, _) => nchannels }} reduceLeft(_ + _) } } @@ -77,12 +75,12 @@ object AddrMap { def apply(elems: AddrMapEntry*): AddrMap = new AddrMap(elems) } -class AddrHashMap(addrmap: AddrMap) { +class AddrHashMap(addrmap: AddrMap, start: BigInt) { val mapping = new HashMap[String, AddrHashMapEntry] - private def genPairs(am: AddrMap): Seq[(String, AddrHashMapEntry)] = { + private def genPairs(am: AddrMap, start: BigInt): Seq[(String, AddrHashMapEntry)] = { var ind = 0 - var base = BigInt(0) + var base = start var pairs = Seq[(String, AddrHashMapEntry)]() am.foreach { case AddrMapEntry(name, startOpt, region) => region match { @@ -94,32 +92,21 @@ class AddrHashMap(addrmap: AddrMap) { } case MemSubmap(size, submap) => { if (!startOpt.isEmpty) base = startOpt.get - val subpairs = genPairs(submap).map { + val subpairs = genPairs(submap, base).map { case (subname, AddrHashMapEntry(subind, subbase, subsize, prot)) => (name + ":" + subname, - AddrHashMapEntry(ind + subind, base + subbase, subsize, prot)) + AddrHashMapEntry(ind + subind, subbase, subsize, prot)) } pairs = subpairs ++ pairs ind += subpairs.size base += size } - // every channel gets the same base and size - case MemChannels(size, nchannels, prot) => { - if (!startOpt.isEmpty) base = startOpt.get - val subpairs = (0 until nchannels).map { i => - val chname = name + ":" + i.toString - (chname, AddrHashMapEntry(ind + i, base, size, prot)) - } - pairs = subpairs ++ pairs - ind += nchannels - base += size - } } } pairs } - for ((name, ind) <- genPairs(addrmap)) { mapping(name) = ind } + for ((name, ind) <- genPairs(addrmap, start)) { mapping(name) = ind } def nEntries: Int = mapping.size def apply(name: String): AddrHashMapEntry = mapping(name) @@ -133,15 +120,18 @@ class AddrHashMap(addrmap: AddrMap) { } def isValid(addr: UInt): Bool = { - sortedEntries().map { case (_, base, size, _) => - addr >= UInt(base) && addr < UInt(base + size) + addr < UInt(start) || sortedEntries().map { + case (_, base, size, _) => + addr >= UInt(base) && addr < UInt(base + size) }.reduceLeft(_ || _) } def getProt(addr: UInt): AddrMapProt = { - Mux1H(sortedEntries().map { case (_, base, size, prot) => - (addr >= UInt(base) && addr < UInt(base + size), - new AddrMapProt().fromBits(Bits(prot, 3))) - }) + val protBits = Mux(addr < UInt(start), + Bits(AddrMapConsts.RWX, 3), + Mux1H(sortedEntries().map { case (_, base, size, prot) => + (addr >= UInt(base) && addr < UInt(base + size), Bits(prot, 3)) + })) + new AddrMapProt().fromBits(protBits) } } diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 0baa463d..579ef6f4 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -479,26 +479,6 @@ class NastiCrossbar(nMasters: Int, nSlaves: Int, routeSel: UInt => UInt) } } -object NastiMultiChannelRouter { - def apply(master: NastiIO, nChannels: Int)(implicit p: Parameters): Vec[NastiIO] = { - if (nChannels == 1) { - Vec(master) - } else { - val dataBytes = p(MIFDataBits) * p(MIFDataBeats) / 8 - val selOffset = log2Up(dataBytes) - val selBits = log2Ceil(nChannels) - // Consecutive blocks route to alternating channels - val routeSel = (addr: UInt) => { - val sel = addr(selOffset + selBits - 1, selOffset) - Vec.tabulate(nChannels)(i => sel === UInt(i)).toBits - } - val router = Module(new NastiRouter(nChannels, routeSel)) - router.io.master <> master - router.io.slave - } - } -} - class NastiInterconnectIO(val nMasters: Int, val nSlaves: Int) (implicit p: Parameters) extends Bundle { /* This is a bit confusing. The interconnect is a slave to the masters and @@ -517,10 +497,8 @@ abstract class NastiInterconnect(implicit p: Parameters) extends NastiModule()(p } class NastiRecursiveInterconnect( - val nMasters: Int, - val nSlaves: Int, - addrmap: AddrMap, - base: BigInt = 0) + val nMasters: Int, val nSlaves: Int, + addrmap: AddrMap, base: BigInt) (implicit p: Parameters) extends NastiInterconnect()(p) { var lastEnd = base var slaveInd = 0 @@ -530,13 +508,16 @@ class NastiRecursiveInterconnect( addrmap.zipWithIndex.foreach { case (AddrMapEntry(name, startOpt, region), i) => val start = startOpt.getOrElse(lastEnd) val size = region.size - realAddrMap(i) = (start, size) - lastEnd = start + size require(bigIntPow2(size), s"Region $name size $size is not a power of 2") require(start % size == 0, f"Region $name start address 0x$start%x not divisible by 0x$size%x" ) + require(start >= lastEnd, + f"Region $name start address 0x$start%x before previous region end") + + realAddrMap(i) = (start, size) + lastEnd = start + size } val routeSel = (addr: UInt) => { @@ -567,10 +548,6 @@ class NastiRecursiveInterconnect( o <> s slaveInd += subSlaves } - case MemChannels(_, nchannels, _) => - require(nchannels == 1, "Recursive interconnect cannot handle MultiChannel interface") - io.slaves(slaveInd) <> xbarSlave - slaveInd += 1 } } } @@ -585,69 +562,37 @@ class ChannelHelper(nChannels: Int) val blockOffset = selOffset + chanSelBits def getSelect(addr: UInt) = - addr(blockOffset - 1, selOffset) + if (nChannels > 1) addr(blockOffset - 1, selOffset) else UInt(0) def getAddr(addr: UInt) = - Cat(addr(nastiXAddrBits - 1, blockOffset), addr(selOffset - 1, 0)) + if (nChannels > 1) + Cat(addr(nastiXAddrBits - 1, blockOffset), addr(selOffset - 1, 0)) + else addr } -/** NASTI interconnect for multi-channel memory + regular IO - * We do routing for the memory channels differently from the IO ports - * Routing memory banks onto memory channels is done via arbiters - * (N-to-1 correspondence between banks and channels) - * Routing extra NASTI masters to memory requires a channel selecting router - * Routing anything to IO just uses standard recursive interconnect - */ -class NastiPerformanceInterconnect( - nBanksPerChannel: Int, - nChannels: Int, - nExtraMasters: Int, - nExtraSlaves: Int, - addrmap: AddrMap)(implicit p: Parameters) extends NastiInterconnect()(p) { +class NastiMemoryInterconnect( + nBanksPerChannel: Int, nChannels: Int) + (implicit p: Parameters) extends NastiInterconnect()(p) { val nBanks = nBanksPerChannel * nChannels - val nMasters = nBanks + nExtraMasters - val nSlaves = nChannels + nExtraSlaves - - val split = addrmap.head.region.size - val iomap = new AddrMap(addrmap.tail) - - def routeMemOrIO(addr: UInt): UInt = { - Cat(addr >= UInt(split), addr < UInt(split)) - } + val nMasters = nBanks + val nSlaves = nChannels val chanHelper = new ChannelHelper(nChannels) - def connectChannel(outer: NastiIO, inner: NastiIO) { outer <> inner outer.ar.bits.addr := chanHelper.getAddr(inner.ar.bits.addr) outer.aw.bits.addr := chanHelper.getAddr(inner.aw.bits.addr) } - val topRouters = List.fill(nMasters){Module(new NastiRouter(2, routeMemOrIO(_)))} - topRouters.zip(io.masters).foreach { - case (router, master) => router.io.master <> master - } - val channelRouteFunc = (addr: UInt) => UIntToOH(chanHelper.getSelect(addr)) - val channelXbar = Module(new NastiCrossbar(nExtraMasters, nChannels, channelRouteFunc)) - channelXbar.io.masters <> topRouters.drop(nBanks).map(_.io.slave(0)) - for (i <- 0 until nChannels) { /* Bank assignments to channels are strided so that consecutive banks * map to different channels. That way, consecutive cache lines also * map to different channels */ - val banks = (i until nBanks by nChannels).map(j => topRouters(j).io.slave(0)) - val extra = channelXbar.io.slaves(i) + val banks = (i until nBanks by nChannels).map(j => io.masters(j)) - val channelArb = Module(new NastiArbiter(nBanksPerChannel + nExtraMasters)) - channelArb.io.master <> (banks :+ extra) + val channelArb = Module(new NastiArbiter(nBanksPerChannel)) + channelArb.io.master <> banks connectChannel(io.slaves(i), channelArb.io.slave) } - - val ioslaves = Vec(io.slaves.drop(nChannels)) - val iomasters = topRouters.map(_.io.slave(1)) - val ioxbar = Module(new NastiRecursiveInterconnect( - nMasters, nExtraSlaves, iomap, split)) - ioxbar.io.masters <> iomasters - ioslaves <> ioxbar.io.slaves } From 60d9291cb5d18e0ab8c424322823152ad31173d0 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 28 Jan 2016 14:50:24 -0800 Subject: [PATCH 052/116] rename external to nastiExternal to avoid name conflicts --- junctions/src/main/scala/nasti.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 579ef6f4..57c12a69 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -12,12 +12,12 @@ case class NastiParameters(dataBits: Int, addrBits: Int, idBits: Int) trait HasNastiParameters { implicit val p: Parameters - val external = p(NastiKey) - val nastiXDataBits = external.dataBits + val nastiExternal = p(NastiKey) + val nastiXDataBits = nastiExternal.dataBits val nastiWStrobeBits = nastiXDataBits / 8 - val nastiXAddrBits = external.addrBits - val nastiWIdBits = external.idBits - val nastiRIdBits = external.idBits + val nastiXAddrBits = nastiExternal.addrBits + val nastiWIdBits = nastiExternal.idBits + val nastiRIdBits = nastiExternal.idBits val nastiXIdBits = max(nastiWIdBits, nastiRIdBits) val nastiXUserBits = 1 val nastiAWUserBits = nastiXUserBits From be424633c1d72ac0a4a8b76146f818c12b0fcf13 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Thu, 4 Feb 2016 15:26:42 -0800 Subject: [PATCH 053/116] Improve ParamaterizedBundle.cloneType()'s error messages Without this it's really hard to read the IllegalArgumentException that you get if you subclass ParamaterizedBundle and don't define a matching cloneType(). --- junctions/src/main/scala/util.scala | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index 865d63b5..28d5a470 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -8,7 +8,17 @@ object bigIntPow2 { } class ParameterizedBundle(implicit p: Parameters) extends Bundle { - override def cloneType = this.getClass.getConstructors.head.newInstance(p).asInstanceOf[this.type] + override def cloneType = { + try { + this.getClass.getConstructors.head.newInstance(p).asInstanceOf[this.type] + } catch { + case e: java.lang.IllegalArgumentException => + throwException("Unable to use ParamaterizedBundle.cloneType on " + + this.getClass + ", probably because " + this.getClass + + "() takes more than one argument. Consider overriding " + + "cloneType() on " + this.getClass, e) + } + } } class HellaFlowQueue[T <: Data](val entries: Int)(data: => T) extends Module { From 62257e0b0472e4fb2efb6b6adc28096cd6fabd18 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Thu, 4 Feb 2016 15:28:46 -0800 Subject: [PATCH 054/116] Uncomment MemSerializedIO.cloneType() Not sure why this was commented, but when I build this against Chisel3 it fails without this override. --- junctions/src/main/scala/memserdes.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index 52abf3c3..b66e5150 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -57,7 +57,7 @@ class MemPipeIO(implicit p: Parameters) extends ParameterizedBundle()(p) { class MemSerializedIO(w: Int)(implicit p: Parameters) extends ParameterizedBundle()(p) { val req = Decoupled(Bits(width = w)) val resp = Valid(Bits(width = w)).flip - //override def cloneType = new MemSerializedIO(w)(p).asInstanceOf[this.type] + override def cloneType = new MemSerializedIO(w)(p).asInstanceOf[this.type] } class MemSerdes(w: Int)(implicit p: Parameters) extends MIFModule From fef8a2d8628ab9aaa29cd6474ef59b56e3183102 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 15 Feb 2016 09:48:35 -0800 Subject: [PATCH 055/116] make sure NastiIOStreamIOConverter does not depend on external last signal --- junctions/src/main/scala/stream.scala | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/junctions/src/main/scala/stream.scala b/junctions/src/main/scala/stream.scala index e49746df..a0380eb5 100644 --- a/junctions/src/main/scala/stream.scala +++ b/junctions/src/main/scala/stream.scala @@ -27,14 +27,19 @@ class NastiIOStreamIOConverter(w: Int)(implicit p: Parameters) extends Module { val streamSize = UInt(log2Up(w / 8)) assert(!io.nasti.ar.valid || io.nasti.ar.bits.size === streamSize, "read channel wrong size on stream") - assert(!io.nasti.ar.valid || io.nasti.ar.bits.burst === BURST_FIXED, + assert(!io.nasti.ar.valid || io.nasti.ar.bits.len === UInt(0) || + io.nasti.ar.bits.burst === BURST_FIXED, "read channel wrong burst type on stream") assert(!io.nasti.aw.valid || io.nasti.aw.bits.size === streamSize, "write channel wrong size on stream") - assert(!io.nasti.aw.valid || io.nasti.aw.bits.burst === BURST_FIXED, + assert(!io.nasti.aw.valid || io.nasti.aw.bits.len === UInt(0) || + io.nasti.aw.bits.burst === BURST_FIXED, "write channel wrong burst type on stream") + assert(!io.nasti.w.valid || io.nasti.w.bits.strb.andR, + "write channel cannot take partial writes") val read_id = Reg(io.nasti.ar.bits.id) + val read_cnt = Reg(io.nasti.ar.bits.len) val reading = Reg(init = Bool(false)) io.nasti.ar.ready := !reading @@ -46,11 +51,16 @@ class NastiIOStreamIOConverter(w: Int)(implicit p: Parameters) extends Module { when (io.nasti.ar.fire()) { read_id := io.nasti.ar.bits.id + read_cnt := io.nasti.ar.bits.len reading := Bool(true) } - when (io.nasti.r.fire() && io.nasti.r.bits.last) { - reading := Bool(false) + when (io.nasti.r.fire()) { + when (read_cnt === UInt(0)) { + reading := Bool(false) + } .otherwise { + read_cnt := read_cnt - UInt(1) + } } val write_id = Reg(io.nasti.aw.bits.id) From 6b39db8ce67cf6edded4a87d29a6b669af9c7f6a Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 16 Feb 2016 23:50:23 -0800 Subject: [PATCH 056/116] Add "NastiMemorySelector", a memory interconnect On Hurricane we want to be able to support multiple memory channels but have a fallback to fewer, since the full configuration is going to require a complicated FPGA setup. This adds another sort of interconnect that can switch between having different numbers of top-level memory channels active at chip boot time. This interconnect is a bit funny: changing the select input when there is memory traffic is a bad idea. This is fine for this use case, since we really only care about changing the memory configuration at boot time -- since it'll scramble the memory of the machine it's not so useful, anyway. The advantage is that we don't have to have a full 8x8 Nasti crossbar in our chip, which would be fairly expensive. Changing the crossbar would garble memory as well, so it's not like it would add any extra functionality. --- junctions/src/main/scala/nasti.scala | 79 ++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 57c12a69..9567cfb4 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -596,3 +596,82 @@ class NastiMemoryInterconnect( connectChannel(io.slaves(i), channelArb.io.slave) } } + +/** Allows users to switch between various memory configurations. Note that + * this is a dangerous operation: not only does switching the select input to + * this module violate Nasti, it also causes the memory of the machine to + * become garbled. It's expected that select only changes at boot time, as + * part of the memory controller configuration. */ +class NastiMemorySelector(nBanks: Int, maxMemChannels: Int, configs: Seq[Int])(implicit p: Parameters) extends NastiModule()(p) { + val nMasters = nBanks + val nSlaves = maxMemChannels + val nConfigs = configs.size + + val io = new Bundle { + val masters = Vec(nMasters, new NastiIO).flip + val slaves = Vec(nSlaves, new NastiIO) + val select = UInt(INPUT, width = log2Up(nConfigs)) + } + + def muxOnSelect(up: DecoupledIO[Bundle], dn: DecoupledIO[Bundle], active: Bool): Unit = { + when (active) { dn.bits := up.bits } + when (active) { up.ready := dn.ready } + when (active) { dn.valid := up.valid } + } + + def muxOnSelect(up: NastiIO, dn: NastiIO, active: Bool): Unit = { + muxOnSelect(up.aw, dn.aw, active) + muxOnSelect(up.w, dn.w, active) + muxOnSelect(dn.b, up.b, active) + muxOnSelect(up.ar, dn.ar, active) + muxOnSelect(dn.r, up.r, active) + } + + def muxOnSelect(up: Vec[NastiIO], dn: Vec[NastiIO], active: Bool) : Unit = { + for (i <- 0 until up.size) + muxOnSelect(up(i), dn(i), active) + } + + /* Disconnects a vector of Nasti ports, which involves setting them to + * invalid. Due to Chisel reasons, we need to also set the bits to 0 (since + * there can't be any unconnected inputs). */ + def disconnectSlave(slave: Vec[NastiIO]) = { + slave.foreach{ m => + m.aw.valid := Bool(false) + m.aw.bits := m.aw.bits.fromBits( UInt(0) ) + m.w.valid := Bool(false) + m.w.bits := m.w.bits.fromBits( UInt(0) ) + m.b.ready := Bool(false) + m.ar.valid := Bool(false) + m.ar.bits := m.ar.bits.fromBits( UInt(0) ) + m.r.ready := Bool(false) + } + } + + def disconnectMaster(master: Vec[NastiIO]) = { + master.foreach{ m => + m.aw.ready := Bool(false) + m.w.ready := Bool(false) + m.b.valid := Bool(false) + m.b.bits := m.b.bits.fromBits( UInt(0) ) + m.ar.ready := Bool(false) + m.r.valid := Bool(false) + m.r.bits := m.r.bits.fromBits( UInt(0) ) + } + } + + /* Provides default wires on all our outputs. */ + disconnectMaster(io.masters) + disconnectSlave(io.slaves) + + /* Constructs interconnects for each of the layouts suggested by the + * configuration and switches between them based on the select input. */ + configs.zipWithIndex.foreach{ case (nChannels, select) => + val nBanksPerChannel = nBanks / nChannels + val ic = Module(new NastiMemoryInterconnect(nBanksPerChannel, nChannels)) + disconnectMaster(ic.io.slaves) + disconnectSlave(ic.io.masters) + muxOnSelect( io.masters, ic.io.masters, io.select === UInt(select)) + muxOnSelect(ic.io.slaves, io.slaves, io.select === UInt(select)) + } +} From 770f2742de30ee158986a1075be6369b0987e6fb Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Wed, 17 Feb 2016 10:41:01 -0800 Subject: [PATCH 057/116] Make NastiMemorySelector a subtype of NastiInterconnect When RocketChip has a single memory configuration I want to ensure no extra hardware is being generated by only instantiating a NastiMemoryInterconnect rather than a NastiMemorySelector, which I believe will insert a Mux with 0 when there is only one config (because there aren't any 0-width wires allowed). --- junctions/src/main/scala/nasti.scala | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 9567cfb4..b2e0aa59 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -602,16 +602,22 @@ class NastiMemoryInterconnect( * this module violate Nasti, it also causes the memory of the machine to * become garbled. It's expected that select only changes at boot time, as * part of the memory controller configuration. */ -class NastiMemorySelector(nBanks: Int, maxMemChannels: Int, configs: Seq[Int])(implicit p: Parameters) extends NastiModule()(p) { +class NastiMemorySelectorIO(val nBanks: Int, val maxMemChannels: Int, nConfigs: Int) + (implicit p: Parameters) + extends NastiInterconnectIO(nBanks, maxMemChannels) { + val select = UInt(INPUT, width = log2Up(nConfigs)) + override def cloneType = + new NastiMemorySelectorIO(nMasters, nSlaves, nConfigs).asInstanceOf[this.type] +} + +class NastiMemorySelector(nBanks: Int, maxMemChannels: Int, configs: Seq[Int]) + (implicit p: Parameters) + extends NastiInterconnect()(p) { val nMasters = nBanks val nSlaves = maxMemChannels val nConfigs = configs.size - val io = new Bundle { - val masters = Vec(nMasters, new NastiIO).flip - val slaves = Vec(nSlaves, new NastiIO) - val select = UInt(INPUT, width = log2Up(nConfigs)) - } + override lazy val io = new NastiMemorySelectorIO(nBanks, maxMemChannels, nConfigs) def muxOnSelect(up: DecoupledIO[Bundle], dn: DecoupledIO[Bundle], active: Bool): Unit = { when (active) { dn.bits := up.bits } From 5241ee6442d1be538e17bb646dfe375dc9e60088 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 16 Feb 2016 21:45:39 -0800 Subject: [PATCH 058/116] add multi-width FIFO --- junctions/src/main/scala/util.scala | 68 +++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index 28d5a470..f03251f1 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -219,3 +219,71 @@ class DecoupledHelper(val rvs: Seq[Bool]) { (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } } + +class MultiWidthFifo(inW: Int, outW: Int, n: Int) extends Module { + val io = new Bundle { + val in = Decoupled(Bits(width = inW)).flip + val out = Decoupled(Bits(width = outW)) + } + + if (inW == outW) { + io.out <> Queue(io.in, n) + } else if (inW > outW) { + val nBeats = inW / outW + + require(inW % outW == 0, s"MultiWidthFifo: in: $inW not divisible by out: $outW") + require(n % nBeats == 0, s"Cannot store $n output words when output beats is $nBeats") + + val wdata = Reg(Vec(n / nBeats, Bits(width = inW))) + val rdata = Vec(wdata.flatMap { indat => + (0 until nBeats).map(i => indat(outW * (i + 1) - 1, outW * i)) }) + + val head = Reg(init = UInt(0, log2Up(n / nBeats))) + val tail = Reg(init = UInt(0, log2Up(n))) + val size = Reg(init = UInt(0, log2Up(n + 1))) + + when (io.in.fire()) { + wdata(head) := io.in.bits + head := head + UInt(1) + } + + when (io.out.fire()) { tail := tail + UInt(1) } + + size := MuxCase(size, Seq( + (io.in.fire() && io.out.fire()) -> (size + UInt(nBeats - 1)), + io.in.fire() -> (size + UInt(nBeats)), + io.out.fire() -> (size - UInt(1)))) + + io.out.valid := size > UInt(0) + io.out.bits := rdata(tail) + io.in.ready := size < UInt(n) + } else { + val nBeats = outW / inW + + require(outW % inW == 0, s"MultiWidthFifo: out: $outW not divisible by in: $inW") + + val wdata = Reg(Vec(n * nBeats, Bits(width = inW))) + val rdata = Vec.tabulate(n) { i => + Cat(wdata.slice(i * nBeats, (i + 1) * nBeats).reverse)} + + val head = Reg(init = UInt(0, log2Up(n * nBeats))) + val tail = Reg(init = UInt(0, log2Up(n))) + val size = Reg(init = UInt(0, log2Up(n * nBeats + 1))) + + when (io.in.fire()) { + wdata(head) := io.in.bits + head := head + UInt(1) + } + + when (io.out.fire()) { tail := tail + UInt(1) } + + size := MuxCase(size, Seq( + (io.in.fire() && io.out.fire()) -> (size - UInt(nBeats - 1)), + io.in.fire() -> (size + UInt(1)), + io.out.fire() -> (size - UInt(nBeats)))) + + io.out.valid := size >= UInt(nBeats) + io.out.bits := rdata(tail) + io.in.ready := size < UInt(n * nBeats) + } +} From fbd66ac87b29a70e02d4ab3a5a381d884fc22edb Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 17 Feb 2016 13:35:03 -0800 Subject: [PATCH 059/116] expose a count in MultiWidthFifo --- junctions/src/main/scala/util.scala | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index f03251f1..9455d9b2 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -224,10 +224,14 @@ class MultiWidthFifo(inW: Int, outW: Int, n: Int) extends Module { val io = new Bundle { val in = Decoupled(Bits(width = inW)).flip val out = Decoupled(Bits(width = outW)) + val count = UInt(OUTPUT, log2Up(n + 1)) } if (inW == outW) { - io.out <> Queue(io.in, n) + val q = Module(new Queue(Bits(width = inW), n)) + q.io.enq <> io.in + io.out <> q.io.deq + io.count := q.io.count } else if (inW > outW) { val nBeats = inW / outW @@ -257,6 +261,7 @@ class MultiWidthFifo(inW: Int, outW: Int, n: Int) extends Module { io.out.valid := size > UInt(0) io.out.bits := rdata(tail) io.in.ready := size < UInt(n) + io.count := size } else { val nBeats = outW / inW @@ -282,7 +287,8 @@ class MultiWidthFifo(inW: Int, outW: Int, n: Int) extends Module { io.in.fire() -> (size + UInt(1)), io.out.fire() -> (size - UInt(nBeats)))) - io.out.valid := size >= UInt(nBeats) + io.count := size >> UInt(log2Up(nBeats)) + io.out.valid := io.count > UInt(0) io.out.bits := rdata(tail) io.in.ready := size < UInt(n * nBeats) } From db3b2c264c19b4f254117c53426f07e280c01de5 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 21 Jan 2016 17:39:57 -0800 Subject: [PATCH 060/116] Add constructors, converters, and serdes for AXI tunneled over SERDES (AtoS) --- junctions/src/main/scala/atos.scala | 286 ++++++++++++++++++++++++++ junctions/src/main/scala/stream.scala | 93 +++++++++ 2 files changed, 379 insertions(+) create mode 100644 junctions/src/main/scala/atos.scala diff --git a/junctions/src/main/scala/atos.scala b/junctions/src/main/scala/atos.scala new file mode 100644 index 00000000..ed2160a8 --- /dev/null +++ b/junctions/src/main/scala/atos.scala @@ -0,0 +1,286 @@ +package junctions + +import Chisel._ +import scala.math.max +import cde.{Parameters, Field} + +trait HasAtosParameters extends HasNastiParameters { + // round up to a multiple of 32 + def roundup(n: Int) = 32 * ((n - 1) / 32 + 1) + + val atosUnionBits = max(nastiXDataBits + nastiWStrobeBits + 1, + nastiXIdBits + nastiXBurstBits + + nastiXSizeBits + nastiXLenBits + nastiXAddrBits) + val atosIdBits = nastiXIdBits + val atosTypBits = 2 + val atosRespBits = nastiXRespBits + val atosDataBits = nastiXDataBits + + val atosLenOffset = nastiXAddrBits + val atosSizeOffset = atosLenOffset + nastiXLenBits + val atosBurstOffset = atosSizeOffset + nastiXSizeBits + val atosIdOffset = atosBurstOffset + nastiXBurstBits + val atosStrobeOffset = nastiXDataBits + val atosLastOffset = atosStrobeOffset + nastiWStrobeBits + + val atosRequestBits = roundup(atosTypBits + atosUnionBits) + val atosResponseBits = roundup(atosTypBits + atosIdBits + atosRespBits + atosDataBits + 1) + val atosRequestBytes = atosRequestBits / 8 + val atosResponseBytes = atosResponseBits / 8 + val atosRequestWords = atosRequestBytes / 4 + val atosResponseWords = atosResponseBytes / 4 +} + +abstract class AtosModule(implicit val p: Parameters) + extends Module with HasAtosParameters +abstract class AtosBundle(implicit val p: Parameters) + extends ParameterizedBundle()(p) with HasAtosParameters + +object AtosRequest { + def arType = UInt("b00") + def awType = UInt("b01") + def wType = UInt("b10") + + def apply(typ: UInt, union: UInt)(implicit p: Parameters): AtosRequest = { + val areq = Wire(new AtosRequest) + areq.typ := typ + areq.union := union + areq + } + + def apply(ar: NastiReadAddressChannel)(implicit p: Parameters): AtosRequest = + apply(arType, Cat(ar.id, ar.burst, ar.size, ar.len, ar.addr)) + + def apply(aw: NastiWriteAddressChannel)(implicit p: Parameters): AtosRequest = + apply(awType, Cat(aw.id, aw.burst, aw.size, aw.len, aw.addr)) + + def apply(w: NastiWriteDataChannel)(implicit p: Parameters): AtosRequest = + apply(wType, Cat(w.last, w.strb, w.data)) +} + +class AtosRequest(implicit p: Parameters) + extends AtosBundle()(p) with Serializable { + val typ = UInt(width = atosTypBits) + val union = UInt(width = atosUnionBits) + + def id(dummy: Int = 0) = + union(atosIdOffset + nastiXIdBits - 1, atosIdOffset) + + def burst(dummy: Int = 0) = + union(atosIdOffset - 1, atosBurstOffset) + + def size(dummy: Int = 0) = + union(atosBurstOffset - 1, atosSizeOffset) + + def len(dummy: Int = 0) = + union(atosSizeOffset - 1, atosLenOffset) + + def addr(dummy: Int = 0) = + union(atosLenOffset - 1, 0) + + def data(dummy: Int = 0) = + union(nastiXDataBits - 1, 0) + + def strb(dummy: Int = 0) = + union(atosLastOffset - 1, atosStrobeOffset) + + def last(dummy: Int = 0) = + union(atosLastOffset) + + def has_addr(dummy: Int = 0) = + typ === AtosRequest.arType || typ === AtosRequest.awType + + def has_data(dummy: Int = 0) = + typ === AtosRequest.wType + + def is_last(dummy: Int = 0) = + typ === AtosRequest.arType || (typ === AtosRequest.wType && last()) + + def nbytes: Int = atosRequestBytes +} + +object AtosResponse { + def rType = UInt("b00") + def bType = UInt("b01") + + def apply(typ: UInt, id: UInt, resp: UInt, data: UInt, last: Bool) + (implicit p: Parameters): AtosResponse = { + val aresp = Wire(new AtosResponse) + aresp.typ := typ + aresp.id := id + aresp.resp := resp + aresp.data := data + aresp.last := last + aresp + } + + def apply(r: NastiReadDataChannel)(implicit p: Parameters): AtosResponse = + apply(rType, r.id, r.resp, r.data, r.last) + + def apply(b: NastiWriteResponseChannel)(implicit p: Parameters): AtosResponse = + apply(bType, b.id, b.resp, UInt(0), Bool(false)) +} + +class AtosResponse(implicit p: Parameters) + extends AtosBundle()(p) with Serializable { + val typ = UInt(width = atosTypBits) + val id = UInt(width = atosIdBits) + val resp = UInt(width = atosRespBits) + val last = Bool() + val data = UInt(width = atosDataBits) + + def has_data(dummy: Int = 0) = typ === AtosResponse.rType + + def is_last(dummy: Int = 0) = !has_data() || last + + def nbytes: Int = atosResponseBytes +} + +class AtosIO(implicit p: Parameters) extends AtosBundle()(p) { + val req = Decoupled(new AtosRequest) + val resp = Decoupled(new AtosResponse).flip +} + +class AtosRequestEncoder(implicit p: Parameters) extends AtosModule()(p) { + val io = new Bundle { + val ar = Decoupled(new NastiReadAddressChannel).flip + val aw = Decoupled(new NastiWriteAddressChannel).flip + val w = Decoupled(new NastiWriteDataChannel).flip + val req = Decoupled(new AtosRequest) + } + + val writing = Reg(init = Bool(false)) + + io.ar.ready := !writing && io.req.ready + io.aw.ready := !writing && !io.ar.valid && io.req.ready + io.w.ready := writing && io.req.ready + + io.req.valid := Mux(writing, io.w.valid, io.ar.valid || io.aw.valid) + io.req.bits := Mux(writing, AtosRequest(io.w.bits), + Mux(io.ar.valid, AtosRequest(io.ar.bits), AtosRequest(io.aw.bits))) + + when (io.aw.fire()) { writing := Bool(true) } + when (io.w.fire() && io.w.bits.last) { writing := Bool(false) } +} + +class AtosResponseDecoder(implicit p: Parameters) extends AtosModule()(p) { + val io = new Bundle { + val resp = Decoupled(new AtosResponse).flip + val b = Decoupled(new NastiWriteResponseChannel) + val r = Decoupled(new NastiReadDataChannel) + } + + val is_b = io.resp.bits.typ === AtosResponse.bType + val is_r = io.resp.bits.typ === AtosResponse.rType + + io.b.valid := io.resp.valid && is_b + io.b.bits := NastiWriteResponseChannel( + id = io.resp.bits.id, + resp = io.resp.bits.resp) + + io.r.valid := io.resp.valid && is_r + io.r.bits := NastiReadDataChannel( + id = io.resp.bits.id, + data = io.resp.bits.data, + last = io.resp.bits.last, + resp = io.resp.bits.resp) + + io.resp.ready := (is_b && io.b.ready) || (is_r && io.r.ready) +} + +class AtosClientConverter(implicit p: Parameters) extends AtosModule()(p) { + val io = new Bundle { + val nasti = (new NastiIO).flip + val atos = new AtosIO + } + + val req_enc = Module(new AtosRequestEncoder) + req_enc.io.ar <> io.nasti.ar + req_enc.io.aw <> io.nasti.aw + req_enc.io.w <> io.nasti.w + io.atos.req <> req_enc.io.req + + val resp_dec = Module(new AtosResponseDecoder) + resp_dec.io.resp <> io.atos.resp + io.nasti.b <> resp_dec.io.b + io.nasti.r <> resp_dec.io.r +} + +class AtosRequestDecoder(implicit p: Parameters) extends AtosModule()(p) { + val io = new Bundle { + val req = Decoupled(new AtosRequest).flip + val ar = Decoupled(new NastiReadAddressChannel) + val aw = Decoupled(new NastiWriteAddressChannel) + val w = Decoupled(new NastiWriteDataChannel) + } + + val is_ar = io.req.bits.typ === AtosRequest.arType + val is_aw = io.req.bits.typ === AtosRequest.awType + val is_w = io.req.bits.typ === AtosRequest.wType + + io.ar.valid := io.req.valid && is_ar + io.ar.bits := NastiReadAddressChannel( + id = io.req.bits.id(), + addr = io.req.bits.addr(), + size = io.req.bits.size(), + len = io.req.bits.len(), + burst = io.req.bits.burst()) + + io.aw.valid := io.req.valid && is_aw + io.aw.bits := NastiWriteAddressChannel( + id = io.req.bits.id(), + addr = io.req.bits.addr(), + size = io.req.bits.size(), + len = io.req.bits.len(), + burst = io.req.bits.burst()) + + io.w.valid := io.req.valid && is_w + io.w.bits := NastiWriteDataChannel( + data = io.req.bits.data(), + strb = io.req.bits.strb(), + last = io.req.bits.last()) + + io.req.ready := (io.ar.ready && is_ar) || + (io.aw.ready && is_aw) || + (io.w.ready && is_w) +} + +class AtosResponseEncoder(implicit p: Parameters) extends AtosModule()(p) { + val io = new Bundle { + val b = Decoupled(new NastiWriteResponseChannel).flip + val r = Decoupled(new NastiReadDataChannel).flip + val resp = Decoupled(new AtosResponse) + } + + val locked = Reg(init = Bool(false)) + + io.resp.valid := (io.b.valid && !locked) || io.r.valid + io.resp.bits := Mux(io.r.valid, + AtosResponse(io.r.bits), AtosResponse(io.b.bits)) + + io.b.ready := !locked && !io.r.valid && io.resp.ready + io.r.ready := io.resp.ready + + when (io.r.fire() && !io.r.bits.last) { locked := Bool(true) } + when (io.r.fire() && io.r.bits.last) { locked := Bool(false) } +} + +class AtosManagerConverter(implicit p: Parameters) extends AtosModule()(p) { + val io = new Bundle { + val atos = (new AtosIO).flip + val nasti = new NastiIO + } + + val req_dec = Module(new AtosRequestDecoder) + val resp_enc = Module(new AtosResponseEncoder) + + req_dec.io.req <> io.atos.req + io.atos.resp <> resp_enc.io.resp + + io.nasti.ar <> req_dec.io.ar + io.nasti.aw <> req_dec.io.aw + io.nasti.w <> req_dec.io.w + + resp_enc.io.b <> io.nasti.b + resp_enc.io.r <> io.nasti.r +} diff --git a/junctions/src/main/scala/stream.scala b/junctions/src/main/scala/stream.scala index a0380eb5..2b577272 100644 --- a/junctions/src/main/scala/stream.scala +++ b/junctions/src/main/scala/stream.scala @@ -87,3 +87,96 @@ class NastiIOStreamIOConverter(w: Int)(implicit p: Parameters) extends Module { when (io.nasti.b.fire()) { write_resp := Bool(false) } } + +class StreamNarrower(win: Int, wout: Int) extends Module { + require(win > wout, "Stream narrower input width must be larger than input width") + require(win % wout == 0, "Stream narrower input width must be multiple of output width") + + val io = new Bundle { + val in = Decoupled(new StreamChannel(win)).flip + val out = Decoupled(new StreamChannel(wout)) + } + + val n_pieces = win / wout + val buffer = Reg(Bits(width = win)) + val (piece_idx, pkt_done) = Counter(io.out.fire(), n_pieces) + val pieces = Vec.tabulate(n_pieces) { i => buffer(wout * (i + 1) - 1, wout * i) } + val last_piece = (piece_idx === UInt(n_pieces - 1)) + val sending = Reg(init = Bool(false)) + val in_last = Reg(Bool()) + + when (io.in.fire()) { + buffer := io.in.bits.data + in_last := io.in.bits.last + sending := Bool(true) + } + when (pkt_done) { sending := Bool(false) } + + io.out.valid := sending + io.out.bits.data := pieces(piece_idx) + io.out.bits.last := in_last && last_piece + io.in.ready := !sending +} + +class StreamExpander(win: Int, wout: Int) extends Module { + require(win < wout, "Stream expander input width must be smaller than input width") + require(wout % win == 0, "Stream narrower output width must be multiple of input width") + + val io = new Bundle { + val in = Decoupled(new StreamChannel(win)).flip + val out = Decoupled(new StreamChannel(wout)) + } + + val n_pieces = wout / win + val buffer = Reg(Vec(n_pieces, UInt(width = win))) + val last = Reg(Bool()) + val collecting = Reg(init = Bool(true)) + val (piece_idx, pkt_done) = Counter(io.in.fire(), n_pieces) + + when (io.in.fire()) { buffer(piece_idx) := io.in.bits.data } + when (pkt_done) { last := io.in.bits.last; collecting := Bool(false) } + when (io.out.fire()) { collecting := Bool(true) } + + io.in.ready := collecting + io.out.valid := !collecting + io.out.bits.data := buffer.toBits + io.out.bits.last := last +} + +object StreamUtils { + def connectStreams(a: StreamIO, b: StreamIO) { + a.in <> b.out + b.in <> a.out + } +} + +trait Serializable { + def nbytes: Int +} + +class Serializer[T <: Data with Serializable](typ: T) extends Module { + val io = new Bundle { + val in = Decoupled(typ).flip + val out = Decoupled(new StreamChannel(8)) + } + + val narrower = Module(new StreamNarrower(typ.nbytes * 8, 8)) + narrower.io.in.bits.data := io.in.bits.toBits + narrower.io.in.bits.last := Bool(true) + narrower.io.in.valid := io.in.valid + io.in.ready := narrower.io.in.ready + io.out <> narrower.io.out +} + +class Deserializer[T <: Data with Serializable](typ: T) extends Module { + val io = new Bundle { + val in = Decoupled(new StreamChannel(8)).flip + val out = Decoupled(typ) + } + + val expander = Module(new StreamExpander(8, 8 * typ.nbytes)) + expander.io.in <> io.in + io.out.valid := expander.io.out.valid + io.out.bits := typ.cloneType.fromBits(expander.io.out.bits.data) + expander.io.out.ready := io.out.ready +} From 4f5b1da58b34c28a4953b47d103808a1aa849142 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 16 Feb 2016 10:02:48 -0800 Subject: [PATCH 061/116] add a resp_len helper to AtosRequest --- junctions/src/main/scala/atos.scala | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/junctions/src/main/scala/atos.scala b/junctions/src/main/scala/atos.scala index ed2160a8..f11c7877 100644 --- a/junctions/src/main/scala/atos.scala +++ b/junctions/src/main/scala/atos.scala @@ -97,6 +97,11 @@ class AtosRequest(implicit p: Parameters) typ === AtosRequest.arType || (typ === AtosRequest.wType && last()) def nbytes: Int = atosRequestBytes + + def resp_len(dummy: Int = 0) = + MuxLookup(typ, UInt(0), Seq( + AtosRequest.arType -> (len() + UInt(1)), + AtosRequest.awType -> UInt(1))) } object AtosResponse { From db09f310a15c9de771f13679592c35d517712994 Mon Sep 17 00:00:00 2001 From: Eric Love Date: Fri, 11 Mar 2016 16:48:13 -0800 Subject: [PATCH 062/116] Define MIFMasterTagBits as # bits a master can *use* in tag --- junctions/src/main/scala/memserdes.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index b66e5150..551b20b1 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -8,6 +8,7 @@ import cde.{Parameters, Field} case object MIFAddrBits extends Field[Int] case object MIFDataBits extends Field[Int] case object MIFTagBits extends Field[Int] +case object MIFMasterTagBits extends Field[Int] case object MIFDataBeats extends Field[Int] trait HasMIFParameters { From 34852e406dec92007be62fc877a8f4026ac207db Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 28 Mar 2016 12:22:43 -0700 Subject: [PATCH 063/116] fix bug in NastiRouter --- junctions/src/main/scala/nasti.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index b2e0aa59..75cfb0ec 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -408,8 +408,8 @@ class NastiRouter(nSlaves: Int, routeSel: UInt => UInt)(implicit p: Parameters) aw_ready = aw_ready || (s.aw.ready && aw_route(i)) val chosen = Reg(init = Bool(false)) - when (s.aw.fire()) { chosen := Bool(true) } when (s.w.fire() && s.w.bits.last) { chosen := Bool(false) } + when (s.aw.fire()) { chosen := Bool(true) } s.w.valid := io.master.w.valid && chosen s.w.bits := io.master.w.bits From 015992bc9effa77ec1aa2325c855ec419312ccef Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 28 Mar 2016 12:24:11 -0700 Subject: [PATCH 064/116] no longer need MIFMasterTagBits --- junctions/src/main/scala/memserdes.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/junctions/src/main/scala/memserdes.scala b/junctions/src/main/scala/memserdes.scala index 551b20b1..b66e5150 100644 --- a/junctions/src/main/scala/memserdes.scala +++ b/junctions/src/main/scala/memserdes.scala @@ -8,7 +8,6 @@ import cde.{Parameters, Field} case object MIFAddrBits extends Field[Int] case object MIFDataBits extends Field[Int] case object MIFTagBits extends Field[Int] -case object MIFMasterTagBits extends Field[Int] case object MIFDataBeats extends Field[Int] trait HasMIFParameters { From d66d8f0cd4dd42bbb4ad9bf1b825caff0a4de854 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Fri, 1 Apr 2016 17:41:40 -0700 Subject: [PATCH 065/116] fix SMI converter --- junctions/src/main/scala/smi.scala | 35 +++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index b829abb3..3832788e 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -113,7 +113,6 @@ class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) val id = Reg(UInt(width = nastiRIdBits)) val byteOff = Reg(UInt(width = byteOffBits)) - val sendInd = Reg(init = UInt(0, wordCountBits)) val recvInd = Reg(init = UInt(0, wordCountBits)) val sendDone = Reg(init = Bool(false)) @@ -136,32 +135,30 @@ class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) when (io.ar.fire()) { when (io.ar.bits.size < UInt(byteOffBits)) { nWords := UInt(0) - byteOff := io.ar.bits.addr(byteOffBits - 1, 0) } .otherwise { nWords := calcWordCount(io.ar.bits.size) - byteOff := UInt(0) } nBeats := io.ar.bits.len addr := io.ar.bits.addr(addrOffBits - 1, byteOffBits) + recvInd := io.ar.bits.addr(wordCountBits + byteOffBits - 1, byteOffBits) id := io.ar.bits.id state := s_read } when (io.smi.req.fire()) { addr := addr + UInt(1) - sendInd := sendInd + UInt(1) - sendDone := (sendInd === nWords) + sendDone := (nWords === UInt(0)) } when (io.smi.resp.fire()) { recvInd := recvInd + UInt(1) - buffer(recvInd) := io.smi.resp.bits >> Cat(byteOff, UInt(0, 3)) - when (recvInd === nWords) { state := s_resp } + nWords := nWords - UInt(1) + buffer(recvInd) := io.smi.resp.bits + when (nWords === UInt(0)) { state := s_resp } } when (io.r.fire()) { recvInd := UInt(0) - sendInd := UInt(0) sendDone := Bool(false) // clear all the registers in the buffer buffer.foreach(_ := Bits(0)) @@ -183,19 +180,19 @@ class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) private val maxWordsPerBeat = nastiXDataBits / dataWidth private val byteOffBits = log2Floor(dataBytes) private val addrOffBits = addrWidth + byteOffBits + private val nastiByteOffBits = log2Ceil(nastiXDataBits / 8) assert(!io.aw.valid || io.aw.bits.size >= UInt(byteOffBits), "Nasti size must be >= Smi size") val id = Reg(UInt(width = nastiWIdBits)) val addr = Reg(UInt(width = addrWidth)) + val offset = Reg(UInt(width = nastiByteOffBits)) - def makeStrobe(size: UInt, strb: UInt) = { + def makeStrobe(offset: UInt, size: UInt, strb: UInt) = { val sizemask = (UInt(1) << (UInt(1) << size)) - UInt(1) - val bytemask = sizemask & strb + val bytemask = strb & (sizemask << offset) Vec.tabulate(maxWordsPerBeat){i => bytemask(dataBytes * i)}.toBits - //val strbmask = Vec.tabulate(maxWordsPerBeat){i => strb(dataBytes * i)}.toBits - //sizemask & strbmask } val size = Reg(UInt(width = nastiXSizeBits)) @@ -222,7 +219,13 @@ class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) else UInt(1) when (io.aw.fire()) { - addr := io.aw.bits.addr(addrOffBits - 1, byteOffBits) + if (dataWidth == nastiXDataBits) { + addr := io.aw.bits.addr(addrOffBits - 1, byteOffBits) + } else { + addr := Cat(io.aw.bits.addr(addrOffBits - 1, nastiByteOffBits), + UInt(0, nastiByteOffBits - byteOffBits)) + } + offset := io.aw.bits.addr(nastiByteOffBits - 1, 0) id := io.aw.bits.id size := io.aw.bits.size last := Bool(false) @@ -231,7 +234,7 @@ class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) when (io.w.fire()) { last := io.w.bits.last - strb := makeStrobe(size, io.w.bits.strb) + strb := makeStrobe(offset, size, io.w.bits.strb) data := io.w.bits.data state := s_send } @@ -259,7 +262,9 @@ class SmiIONastiIOConverter(val dataWidth: Int, val addrWidth: Int) val smi = new SmiIO(dataWidth, addrWidth) } - require(isPow2(dataWidth), "Smi data width must be power of 2") + require(isPow2(dataWidth), "SMI data width must be power of 2") + require(dataWidth <= nastiXDataBits, + "SMI data width must be less than or equal to NASTI data width") val reader = Module(new SmiIONastiReadIOConverter(dataWidth, addrWidth)) reader.io.ar <> io.nasti.ar From 1dc8af894ed9e1e5b8a292af9a90054ce648a51c Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 24 Feb 2016 21:33:51 -0800 Subject: [PATCH 066/116] fix serializer/deserializer and add Atos serdes/desser --- junctions/src/main/scala/atos.scala | 40 +++++++++++++++++++++++++-- junctions/src/main/scala/stream.scala | 23 +++++++++------ 2 files changed, 52 insertions(+), 11 deletions(-) diff --git a/junctions/src/main/scala/atos.scala b/junctions/src/main/scala/atos.scala index f11c7877..d526a773 100644 --- a/junctions/src/main/scala/atos.scala +++ b/junctions/src/main/scala/atos.scala @@ -96,7 +96,7 @@ class AtosRequest(implicit p: Parameters) def is_last(dummy: Int = 0) = typ === AtosRequest.arType || (typ === AtosRequest.wType && last()) - def nbytes: Int = atosRequestBytes + def nbits: Int = atosRequestBits def resp_len(dummy: Int = 0) = MuxLookup(typ, UInt(0), Seq( @@ -138,7 +138,7 @@ class AtosResponse(implicit p: Parameters) def is_last(dummy: Int = 0) = !has_data() || last - def nbytes: Int = atosResponseBytes + def nbits: Int = atosResponseBits } class AtosIO(implicit p: Parameters) extends AtosBundle()(p) { @@ -289,3 +289,39 @@ class AtosManagerConverter(implicit p: Parameters) extends AtosModule()(p) { resp_enc.io.b <> io.nasti.b resp_enc.io.r <> io.nasti.r } + +class AtosSerializedIO(w: Int)(implicit p: Parameters) extends ParameterizedBundle()(p) { + val req = Decoupled(Bits(width = w)) + val resp = Decoupled(Bits(width = w)).flip + override def cloneType = new AtosSerializedIO(w)(p).asInstanceOf[this.type] +} + +class AtosSerdes(w: Int)(implicit p: Parameters) extends AtosModule()(p) { + val io = new Bundle { + val wide = (new AtosIO).flip + val narrow = new AtosSerializedIO(w) + } + + val ser = Module(new Serializer(w, new AtosRequest)) + ser.io.in <> io.wide.req + io.narrow.req <> ser.io.out + + val des = Module(new Deserializer(w, new AtosResponse)) + des.io.in <> io.narrow.resp + io.wide.resp <> des.io.out +} + +class AtosDesser(w: Int)(implicit p: Parameters) extends AtosModule()(p) { + val io = new Bundle { + val narrow = new AtosSerializedIO(w).flip + val wide = new AtosIO + } + + val des = Module(new Deserializer(w, new AtosRequest)) + des.io.in <> io.narrow.req + io.wide.req <> des.io.out + + val ser = Module(new Serializer(w, new AtosResponse)) + ser.io.in <> io.wide.resp + io.narrow.resp <> ser.io.out +} diff --git a/junctions/src/main/scala/stream.scala b/junctions/src/main/scala/stream.scala index 2b577272..5ee14c5e 100644 --- a/junctions/src/main/scala/stream.scala +++ b/junctions/src/main/scala/stream.scala @@ -151,31 +151,36 @@ object StreamUtils { } trait Serializable { - def nbytes: Int + def nbits: Int } -class Serializer[T <: Data with Serializable](typ: T) extends Module { +class Serializer[T <: Data with Serializable](w: Int, typ: T) extends Module { val io = new Bundle { val in = Decoupled(typ).flip - val out = Decoupled(new StreamChannel(8)) + val out = Decoupled(Bits(width = w)) } - val narrower = Module(new StreamNarrower(typ.nbytes * 8, 8)) + val narrower = Module(new StreamNarrower(typ.nbits, w)) narrower.io.in.bits.data := io.in.bits.toBits narrower.io.in.bits.last := Bool(true) narrower.io.in.valid := io.in.valid io.in.ready := narrower.io.in.ready - io.out <> narrower.io.out + io.out.valid := narrower.io.out.valid + io.out.bits := narrower.io.out.bits.data + narrower.io.out.ready := io.out.ready } -class Deserializer[T <: Data with Serializable](typ: T) extends Module { +class Deserializer[T <: Data with Serializable](w: Int, typ: T) extends Module { val io = new Bundle { - val in = Decoupled(new StreamChannel(8)).flip + val in = Decoupled(Bits(width = w)).flip val out = Decoupled(typ) } - val expander = Module(new StreamExpander(8, 8 * typ.nbytes)) - expander.io.in <> io.in + val expander = Module(new StreamExpander(w, typ.nbits)) + expander.io.in.valid := io.in.valid + expander.io.in.bits.data := io.in.bits + expander.io.in.bits.last := Bool(true) + io.in.ready := expander.io.in.ready io.out.valid := expander.io.out.valid io.out.bits := typ.cloneType.fromBits(expander.io.out.bits.data) expander.io.out.ready := io.out.ready From 0bf8d07abaec068e3ca7ab075ab4d537c9d8ae3a Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 24 Feb 2016 23:10:06 -0800 Subject: [PATCH 067/116] make AtosSerializedIO clock divisible --- junctions/src/main/scala/atos.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/junctions/src/main/scala/atos.scala b/junctions/src/main/scala/atos.scala index d526a773..d092b14e 100644 --- a/junctions/src/main/scala/atos.scala +++ b/junctions/src/main/scala/atos.scala @@ -293,6 +293,8 @@ class AtosManagerConverter(implicit p: Parameters) extends AtosModule()(p) { class AtosSerializedIO(w: Int)(implicit p: Parameters) extends ParameterizedBundle()(p) { val req = Decoupled(Bits(width = w)) val resp = Decoupled(Bits(width = w)).flip + val clk = Bool(OUTPUT) + val clk_edge = Bool(OUTPUT) override def cloneType = new AtosSerializedIO(w)(p).asInstanceOf[this.type] } From 42c4d1e51f2bf298921046c025e568dc4bbad8fd Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 24 Feb 2016 23:10:22 -0800 Subject: [PATCH 068/116] add NastiMemoryDemux --- junctions/src/main/scala/nasti.scala | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 75cfb0ec..a3ed9875 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -681,3 +681,39 @@ class NastiMemorySelector(nBanks: Int, maxMemChannels: Int, configs: Seq[Int]) muxOnSelect(ic.io.slaves, io.slaves, io.select === UInt(select)) } } + +class NastiMemoryDemux(nRoutes: Int)(implicit p: Parameters) extends NastiModule()(p) { + val io = new Bundle { + val master = (new NastiIO).flip + val slaves = Vec(nRoutes, new NastiIO) + val select = UInt(INPUT, log2Up(nRoutes)) + } + + def connectReqChannel[T <: Data](idx: Int, out: DecoupledIO[T], in: DecoupledIO[T]) { + out.valid := in.valid && io.select === UInt(idx) + out.bits := in.bits + when (io.select === UInt(idx)) { in.ready := out.ready } + } + + def connectRespChannel[T <: Data](idx: Int, out: DecoupledIO[T], in: DecoupledIO[T]) { + when (io.select === UInt(idx)) { out.valid := in.valid } + when (io.select === UInt(idx)) { out.bits := in.bits } + in.ready := out.ready && io.select === UInt(idx) + } + + io.master.ar.ready := Bool(false) + io.master.aw.ready := Bool(false) + io.master.w.ready := Bool(false) + io.master.r.valid := Bool(false) + io.master.r.bits := NastiReadDataChannel(id = UInt(0), data = UInt(0)) + io.master.b.valid := Bool(false) + io.master.b.bits := NastiWriteResponseChannel(id = UInt(0)) + + io.slaves.zipWithIndex.foreach { case (slave, i) => + connectReqChannel(i, slave.ar, io.master.ar) + connectReqChannel(i, slave.aw, io.master.aw) + connectReqChannel(i, slave.w, io.master.w) + connectRespChannel(i, io.master.r, slave.r) + connectRespChannel(i, io.master.b, slave.b) + } +} From 1967186a96bca1553788bc6d8db1928e1ab1e2d5 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 19 Apr 2016 09:38:04 -0700 Subject: [PATCH 069/116] add id field to NastiWriteDataChannel --- junctions/src/main/scala/atos.scala | 30 ++++++++++++++++------------ junctions/src/main/scala/nasti.scala | 8 +++++--- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/junctions/src/main/scala/atos.scala b/junctions/src/main/scala/atos.scala index d092b14e..ee78c853 100644 --- a/junctions/src/main/scala/atos.scala +++ b/junctions/src/main/scala/atos.scala @@ -8,7 +8,8 @@ trait HasAtosParameters extends HasNastiParameters { // round up to a multiple of 32 def roundup(n: Int) = 32 * ((n - 1) / 32 + 1) - val atosUnionBits = max(nastiXDataBits + nastiWStrobeBits + 1, + val atosUnionBits = max( + nastiXIdBits + nastiXDataBits + nastiWStrobeBits + 1, nastiXIdBits + nastiXBurstBits + nastiXSizeBits + nastiXLenBits + nastiXAddrBits) val atosIdBits = nastiXIdBits @@ -16,11 +17,13 @@ trait HasAtosParameters extends HasNastiParameters { val atosRespBits = nastiXRespBits val atosDataBits = nastiXDataBits - val atosLenOffset = nastiXAddrBits + val atosAddrOffset = atosIdBits + val atosLenOffset = atosIdBits + nastiXAddrBits val atosSizeOffset = atosLenOffset + nastiXLenBits val atosBurstOffset = atosSizeOffset + nastiXSizeBits - val atosIdOffset = atosBurstOffset + nastiXBurstBits - val atosStrobeOffset = nastiXDataBits + + val atosDataOffset = atosIdBits + val atosStrobeOffset = nastiXDataBits + atosIdBits val atosLastOffset = atosStrobeOffset + nastiWStrobeBits val atosRequestBits = roundup(atosTypBits + atosUnionBits) @@ -49,13 +52,13 @@ object AtosRequest { } def apply(ar: NastiReadAddressChannel)(implicit p: Parameters): AtosRequest = - apply(arType, Cat(ar.id, ar.burst, ar.size, ar.len, ar.addr)) + apply(arType, Cat(ar.burst, ar.size, ar.len, ar.addr, ar.id)) def apply(aw: NastiWriteAddressChannel)(implicit p: Parameters): AtosRequest = - apply(awType, Cat(aw.id, aw.burst, aw.size, aw.len, aw.addr)) + apply(awType, Cat(aw.burst, aw.size, aw.len, aw.addr, aw.id)) def apply(w: NastiWriteDataChannel)(implicit p: Parameters): AtosRequest = - apply(wType, Cat(w.last, w.strb, w.data)) + apply(wType, Cat(w.last, w.strb, w.data, w.id)) } class AtosRequest(implicit p: Parameters) @@ -63,11 +66,8 @@ class AtosRequest(implicit p: Parameters) val typ = UInt(width = atosTypBits) val union = UInt(width = atosUnionBits) - def id(dummy: Int = 0) = - union(atosIdOffset + nastiXIdBits - 1, atosIdOffset) - def burst(dummy: Int = 0) = - union(atosIdOffset - 1, atosBurstOffset) + union(atosUnionBits - 1, atosBurstOffset) def size(dummy: Int = 0) = union(atosBurstOffset - 1, atosSizeOffset) @@ -76,10 +76,13 @@ class AtosRequest(implicit p: Parameters) union(atosSizeOffset - 1, atosLenOffset) def addr(dummy: Int = 0) = - union(atosLenOffset - 1, 0) + union(atosLenOffset - 1, atosAddrOffset) + + def id(dummy: Int = 0) = + union(atosIdBits - 1, 0) def data(dummy: Int = 0) = - union(nastiXDataBits - 1, 0) + union(atosStrobeOffset - 1, atosDataOffset) def strb(dummy: Int = 0) = union(atosLastOffset - 1, atosStrobeOffset) @@ -241,6 +244,7 @@ class AtosRequestDecoder(implicit p: Parameters) extends AtosModule()(p) { io.w.valid := io.req.valid && is_w io.w.bits := NastiWriteDataChannel( + id = io.req.bits.id(), data = io.req.bits.data(), strb = io.req.bits.strb(), last = io.req.bits.last()) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index a3ed9875..cc2a3a98 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -93,6 +93,7 @@ class NastiWriteAddressChannel(implicit p: Parameters) extends NastiAddressChann class NastiWriteDataChannel(implicit p: Parameters) extends NastiMasterToSlaveChannel()(p) with HasNastiData { + val id = UInt(width = nastiWIdBits) val strb = UInt(width = nastiWStrobeBits) val user = UInt(width = nastiWUserBits) } @@ -167,7 +168,8 @@ object NastiReadAddressChannel { } object NastiWriteDataChannel { - def apply(data: UInt, last: Bool = Bool(true))(implicit p: Parameters): NastiWriteDataChannel = { + def apply(data: UInt, last: Bool = Bool(true), id: UInt = UInt(0)) + (implicit p: Parameters): NastiWriteDataChannel = { val w = Wire(new NastiWriteDataChannel) w.strb := Fill(w.nastiWStrobeBits, UInt(1, 1)) w.data := data @@ -175,9 +177,9 @@ object NastiWriteDataChannel { w.user := UInt(0) w } - def apply(data: UInt, strb: UInt, last: Bool) + def apply(data: UInt, strb: UInt, last: Bool, id: UInt) (implicit p: Parameters): NastiWriteDataChannel = { - val w = apply(data, last) + val w = apply(data, last, id) w.strb := strb w } From 6260ad56e895fb7ad7f4f454aa6a902141d55957 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 21 Apr 2016 15:33:53 -0700 Subject: [PATCH 070/116] stop using MMIOBase and encode cacheability in address map --- junctions/src/main/scala/addrmap.scala | 45 +++++++++++++++++--------- junctions/src/main/scala/nasti.scala | 2 +- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index 5fa34483..275314b5 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -16,7 +16,6 @@ case object PPNBits extends Field[Int] case object VPNBits extends Field[Int] case object GlobalAddrMap extends Field[AddrMap] -case object MMIOBase extends Field[BigInt] trait HasAddrMapParameters { implicit val p: Parameters @@ -30,12 +29,12 @@ trait HasAddrMapParameters { val pgLevelBits = p(PgLevelBits) val asIdBits = p(ASIdBits) - val addrMap = new AddrHashMap(p(GlobalAddrMap), p(MMIOBase)) + val addrMap = new AddrHashMap(p(GlobalAddrMap)) } abstract class MemRegion { def size: BigInt } -case class MemSize(size: BigInt, prot: Int) extends MemRegion +case class MemSize(size: BigInt, prot: Int, cacheable: Boolean = false) extends MemRegion case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion object AddrMapConsts { @@ -55,7 +54,7 @@ class AddrMapProt extends Bundle { case class AddrMapEntry(name: String, start: Option[BigInt], region: MemRegion) -case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int) +case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int, cacheable: Boolean) class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[AddrMapEntry] { @@ -65,17 +64,19 @@ class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[Ad def countSlaves: Int = { this map { entry: AddrMapEntry => entry.region match { - case MemSize(_, _) => 1 + case MemSize(_, _, _) => 1 case MemSubmap(_, submap) => submap.countSlaves }} reduceLeft(_ + _) } + + override def tail: AddrMap = new AddrMap(entries.tail) } object AddrMap { def apply(elems: AddrMapEntry*): AddrMap = new AddrMap(elems) } -class AddrHashMap(addrmap: AddrMap, start: BigInt) { +class AddrHashMap(addrmap: AddrMap, start: BigInt = BigInt(0)) { val mapping = new HashMap[String, AddrHashMapEntry] private def genPairs(am: AddrMap, start: BigInt): Seq[(String, AddrHashMapEntry)] = { @@ -84,18 +85,18 @@ class AddrHashMap(addrmap: AddrMap, start: BigInt) { var pairs = Seq[(String, AddrHashMapEntry)]() am.foreach { case AddrMapEntry(name, startOpt, region) => region match { - case MemSize(size, prot) => { + case MemSize(size, prot, cacheable) => { if (!startOpt.isEmpty) base = startOpt.get - pairs = (name, AddrHashMapEntry(ind, base, size, prot)) +: pairs + pairs = (name, AddrHashMapEntry(ind, base, size, prot, cacheable)) +: pairs base += size ind += 1 } case MemSubmap(size, submap) => { if (!startOpt.isEmpty) base = startOpt.get val subpairs = genPairs(submap, base).map { - case (subname, AddrHashMapEntry(subind, subbase, subsize, prot)) => + case (subname, AddrHashMapEntry(subind, subbase, subsize, prot, cacheable)) => (name + ":" + subname, - AddrHashMapEntry(ind + subind, subbase, subsize, prot)) + AddrHashMapEntry(ind + subind, subbase, subsize, prot, cacheable)) } pairs = subpairs ++ pairs ind += subpairs.size @@ -111,17 +112,29 @@ class AddrHashMap(addrmap: AddrMap, start: BigInt) { def nEntries: Int = mapping.size def apply(name: String): AddrHashMapEntry = mapping(name) def get(name: String): Option[AddrHashMapEntry] = mapping.get(name) - def sortedEntries(): Seq[(String, BigInt, BigInt, Int)] = { - val arr = new Array[(String, BigInt, BigInt, Int)](mapping.size) - mapping.foreach { case (name, AddrHashMapEntry(port, base, size, prot)) => - arr(port) = (name, base, size, prot) + def sortedEntries(): Seq[(String, BigInt, BigInt, Int, Boolean)] = { + val arr = new Array[(String, BigInt, BigInt, Int, Boolean)](mapping.size) + mapping.foreach { case (name, AddrHashMapEntry(port, base, size, prot, cacheable)) => + arr(port) = (name, base, size, prot, cacheable) } arr.toSeq } + def isInRegion(name: String, addr: UInt): Bool = { + val start = mapping(name).start + val size = mapping(name).size + UInt(start) <= addr && addr < UInt(start + size) + } + + def isCacheable(addr: UInt): Bool = { + sortedEntries().map { case (_, base, size, _, cacheable) => + UInt(base) <= addr && addr < UInt(base + size) && Bool(cacheable) + }.reduce(_ || _) + } + def isValid(addr: UInt): Bool = { addr < UInt(start) || sortedEntries().map { - case (_, base, size, _) => + case (_, base, size, _, _) => addr >= UInt(base) && addr < UInt(base + size) }.reduceLeft(_ || _) } @@ -129,7 +142,7 @@ class AddrHashMap(addrmap: AddrMap, start: BigInt) { def getProt(addr: UInt): AddrMapProt = { val protBits = Mux(addr < UInt(start), Bits(AddrMapConsts.RWX, 3), - Mux1H(sortedEntries().map { case (_, base, size, prot) => + Mux1H(sortedEntries().map { case (_, base, size, prot, _) => (addr >= UInt(base) && addr < UInt(base + size), Bits(prot, 3)) })) new AddrMapProt().fromBits(protBits) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index cc2a3a98..cbb6e729 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -534,7 +534,7 @@ class NastiRecursiveInterconnect( addrmap.zip(realAddrMap).zip(xbar.io.slaves).zipWithIndex.foreach { case (((entry, (start, size)), xbarSlave), i) => { entry.region match { - case MemSize(_, _) => + case MemSize(_, _, _) => io.slaves(slaveInd) <> xbarSlave slaveInd += 1 case MemSubmap(_, submap) => From d3dee2c6c64943d4730ce5b942211ab07617784d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 27 Apr 2016 14:51:52 -0700 Subject: [PATCH 071/116] support countSlaves on empty address maps --- junctions/src/main/scala/addrmap.scala | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index 275314b5..9cb3d681 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -32,10 +32,17 @@ trait HasAddrMapParameters { val addrMap = new AddrHashMap(p(GlobalAddrMap)) } -abstract class MemRegion { def size: BigInt } +abstract class MemRegion { + def size: BigInt + def numSlaves: Int +} -case class MemSize(size: BigInt, prot: Int, cacheable: Boolean = false) extends MemRegion -case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion +case class MemSize(size: BigInt, prot: Int, cacheable: Boolean = false) extends MemRegion { + def numSlaves = 1 +} +case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion { + val numSlaves = entries.countSlaves +} object AddrMapConsts { val R = 0x1 @@ -57,17 +64,11 @@ case class AddrMapEntry(name: String, start: Option[BigInt], region: MemRegion) case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int, cacheable: Boolean) class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[AddrMapEntry] { - def apply(index: Int): AddrMapEntry = entries(index) def length: Int = entries.size - def countSlaves: Int = { - this map { entry: AddrMapEntry => entry.region match { - case MemSize(_, _, _) => 1 - case MemSubmap(_, submap) => submap.countSlaves - }} reduceLeft(_ + _) - } + def countSlaves: Int = entries.map(_.region.numSlaves).foldLeft(0)(_ + _) override def tail: AddrMap = new AddrMap(entries.tail) } From c8b1f0801bc6a4db5be0304252176ed17cda1500 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 27 Apr 2016 14:52:05 -0700 Subject: [PATCH 072/116] Remove start address option from AddrMapEntries It appears to never be used, and clutters things. The new invariant is that AddrMaps are relative and AddrHashMaps are absolute. --- junctions/src/main/scala/addrmap.scala | 34 +++++++++++--------------- junctions/src/main/scala/nasti.scala | 9 +++---- junctions/src/main/scala/util.scala | 4 --- 3 files changed, 18 insertions(+), 29 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index 9cb3d681..64cf293e 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -59,7 +59,7 @@ class AddrMapProt extends Bundle { val r = Bool() } -case class AddrMapEntry(name: String, start: Option[BigInt], region: MemRegion) +case class AddrMapEntry(name: String, region: MemRegion) case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int, cacheable: Boolean) @@ -84,26 +84,20 @@ class AddrHashMap(addrmap: AddrMap, start: BigInt = BigInt(0)) { var ind = 0 var base = start var pairs = Seq[(String, AddrHashMapEntry)]() - am.foreach { case AddrMapEntry(name, startOpt, region) => - region match { - case MemSize(size, prot, cacheable) => { - if (!startOpt.isEmpty) base = startOpt.get - pairs = (name, AddrHashMapEntry(ind, base, size, prot, cacheable)) +: pairs - base += size - ind += 1 + am.foreach { + case AddrMapEntry(name, MemSize(size, prot, cacheable)) => + pairs = (name, AddrHashMapEntry(ind, base, size, prot, cacheable)) +: pairs + base += size + ind += 1 + case AddrMapEntry(name, MemSubmap(size, submap)) => + val subpairs = genPairs(submap, base).map { + case (subname, AddrHashMapEntry(subind, subbase, subsize, prot, cacheable)) => + (name + ":" + subname, + AddrHashMapEntry(ind + subind, subbase, subsize, prot, cacheable)) } - case MemSubmap(size, submap) => { - if (!startOpt.isEmpty) base = startOpt.get - val subpairs = genPairs(submap, base).map { - case (subname, AddrHashMapEntry(subind, subbase, subsize, prot, cacheable)) => - (name + ":" + subname, - AddrHashMapEntry(ind + subind, subbase, subsize, prot, cacheable)) - } - pairs = subpairs ++ pairs - ind += subpairs.size - base += size - } - } + pairs = subpairs ++ pairs + ind += subpairs.size + base += size } pairs } diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index cbb6e729..cbee05b2 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -505,21 +505,20 @@ class NastiRecursiveInterconnect( var lastEnd = base var slaveInd = 0 val levelSize = addrmap.size - val realAddrMap = new ArraySeq[(BigInt, BigInt)](addrmap.size) - addrmap.zipWithIndex.foreach { case (AddrMapEntry(name, startOpt, region), i) => - val start = startOpt.getOrElse(lastEnd) + val realAddrMap = addrmap map { case AddrMapEntry(name, region) => + val start = lastEnd val size = region.size - require(bigIntPow2(size), + require(isPow2(size), s"Region $name size $size is not a power of 2") require(start % size == 0, f"Region $name start address 0x$start%x not divisible by 0x$size%x" ) require(start >= lastEnd, f"Region $name start address 0x$start%x before previous region end") - realAddrMap(i) = (start, size) lastEnd = start + size + (start, size) } val routeSel = (addr: UInt) => { diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index 9455d9b2..23435548 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -3,10 +3,6 @@ package junctions import Chisel._ import cde.Parameters -object bigIntPow2 { - def apply(in: BigInt): Boolean = in > 0 && ((in & (in-1)) == 0) -} - class ParameterizedBundle(implicit p: Parameters) extends Bundle { override def cloneType = { try { From e4ace55d772f67ed0f7d458668a59539be99dc3d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 28 Apr 2016 16:08:32 -0700 Subject: [PATCH 073/116] Address Map refactoring --- junctions/src/main/scala/addrmap.scala | 99 ++++++++++++++------------ junctions/src/main/scala/nasti.scala | 54 +++++--------- 2 files changed, 72 insertions(+), 81 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index 64cf293e..d5ddeb95 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -32,25 +32,30 @@ trait HasAddrMapParameters { val addrMap = new AddrHashMap(p(GlobalAddrMap)) } +case class MemAttr(prot: Int, cacheable: Boolean = false) + abstract class MemRegion { + def align: BigInt def size: BigInt def numSlaves: Int } -case class MemSize(size: BigInt, prot: Int, cacheable: Boolean = false) extends MemRegion { +case class MemSize(size: BigInt, align: BigInt, attr: MemAttr) extends MemRegion { def numSlaves = 1 } case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion { val numSlaves = entries.countSlaves + val align = entries.computeAlign } -object AddrMapConsts { +object AddrMapProt { val R = 0x1 val W = 0x2 val X = 0x4 val RW = R | W val RX = R | X val RWX = R | W | X + val SZ = 3 } class AddrMapProt extends Bundle { @@ -61,15 +66,21 @@ class AddrMapProt extends Bundle { case class AddrMapEntry(name: String, region: MemRegion) -case class AddrHashMapEntry(port: Int, start: BigInt, size: BigInt, prot: Int, cacheable: Boolean) +case class AddrHashMapEntry(port: Int, start: BigInt, region: MemRegion) class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[AddrMapEntry] { + private val hash = HashMap(entries.map(e => (e.name, e.region)):_*) + def apply(index: Int): AddrMapEntry = entries(index) def length: Int = entries.size def countSlaves: Int = entries.map(_.region.numSlaves).foldLeft(0)(_ + _) + def computeSize: BigInt = new AddrHashMap(this).size + + def computeAlign: BigInt = entries.map(_.region.align).foldLeft(BigInt(1))(_ max _) + override def tail: AddrMap = new AddrMap(entries.tail) } @@ -78,68 +89,68 @@ object AddrMap { } class AddrHashMap(addrmap: AddrMap, start: BigInt = BigInt(0)) { - val mapping = new HashMap[String, AddrHashMapEntry] + private val mapping = HashMap[String, AddrHashMapEntry]() + private val subMaps = HashMap[String, AddrHashMapEntry]() - private def genPairs(am: AddrMap, start: BigInt): Seq[(String, AddrHashMapEntry)] = { - var ind = 0 + private def genPairs(am: AddrMap, start: BigInt, startIdx: Int, prefix: String): (BigInt, Int) = { + var ind = startIdx var base = start - var pairs = Seq[(String, AddrHashMapEntry)]() - am.foreach { - case AddrMapEntry(name, MemSize(size, prot, cacheable)) => - pairs = (name, AddrHashMapEntry(ind, base, size, prot, cacheable)) +: pairs - base += size + am.foreach { ame => + val name = prefix + ame.name + base = (base + ame.region.align - 1) / ame.region.align * ame.region.align + ame.region match { + case r: MemSize => + mapping += name -> AddrHashMapEntry(ind, base, r) + base += r.size ind += 1 - case AddrMapEntry(name, MemSubmap(size, submap)) => - val subpairs = genPairs(submap, base).map { - case (subname, AddrHashMapEntry(subind, subbase, subsize, prot, cacheable)) => - (name + ":" + subname, - AddrHashMapEntry(ind + subind, subbase, subsize, prot, cacheable)) - } - pairs = subpairs ++ pairs - ind += subpairs.size - base += size - } - pairs + case r: MemSubmap => + subMaps += name -> AddrHashMapEntry(-1, base, r) + ind = genPairs(r.entries, base, ind, name + ":")._2 + base += r.size + }} + (base, ind) } - for ((name, ind) <- genPairs(addrmap, start)) { mapping(name) = ind } + val size = genPairs(addrmap, start, 0, "")._1 - def nEntries: Int = mapping.size - def apply(name: String): AddrHashMapEntry = mapping(name) - def get(name: String): Option[AddrHashMapEntry] = mapping.get(name) - def sortedEntries(): Seq[(String, BigInt, BigInt, Int, Boolean)] = { - val arr = new Array[(String, BigInt, BigInt, Int, Boolean)](mapping.size) - mapping.foreach { case (name, AddrHashMapEntry(port, base, size, prot, cacheable)) => - arr(port) = (name, base, size, prot, cacheable) + val sortedEntries: Seq[(String, BigInt, MemSize)] = { + val arr = new Array[(String, BigInt, MemSize)](mapping.size) + mapping.foreach { case (name, AddrHashMapEntry(port, base, region)) => + arr(port) = (name, base, region.asInstanceOf[MemSize]) } arr.toSeq } + def nEntries: Int = mapping.size + def apply(name: String): AddrHashMapEntry = mapping.getOrElse(name, subMaps(name)) + def subMap(name: String): (BigInt, AddrMap) = { + val m = subMaps(name) + (m.start, m.region.asInstanceOf[MemSubmap].entries) + } + def isInRegion(name: String, addr: UInt): Bool = { val start = mapping(name).start - val size = mapping(name).size + val size = mapping(name).region.size UInt(start) <= addr && addr < UInt(start + size) } def isCacheable(addr: UInt): Bool = { - sortedEntries().map { case (_, base, size, _, cacheable) => - UInt(base) <= addr && addr < UInt(base + size) && Bool(cacheable) - }.reduce(_ || _) + sortedEntries.filter(_._3.attr.cacheable).map { case (_, base, region) => + UInt(base) <= addr && addr < UInt(base + region.size) + }.foldLeft(Bool(false))(_ || _) } def isValid(addr: UInt): Bool = { - addr < UInt(start) || sortedEntries().map { - case (_, base, size, _, _) => - addr >= UInt(base) && addr < UInt(base + size) - }.reduceLeft(_ || _) + sortedEntries.map { case (_, base, region) => + addr >= UInt(base) && addr < UInt(base + region.size) + }.foldLeft(Bool(false))(_ || _) } def getProt(addr: UInt): AddrMapProt = { - val protBits = Mux(addr < UInt(start), - Bits(AddrMapConsts.RWX, 3), - Mux1H(sortedEntries().map { case (_, base, size, prot, _) => - (addr >= UInt(base) && addr < UInt(base + size), Bits(prot, 3)) - })) - new AddrMapProt().fromBits(protBits) + val protForRegion = sortedEntries.map { case (_, base, region) => + val inRegion = addr >= UInt(base) && addr < UInt(base + region.size) + Mux(inRegion, UInt(region.attr.prot, AddrMapProt.SZ), UInt(0)) + } + new AddrMapProt().fromBits(protForRegion.reduce(_|_)) } } diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index cbee05b2..e9324bd3 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -502,53 +502,33 @@ class NastiRecursiveInterconnect( val nMasters: Int, val nSlaves: Int, addrmap: AddrMap, base: BigInt) (implicit p: Parameters) extends NastiInterconnect()(p) { - var lastEnd = base - var slaveInd = 0 val levelSize = addrmap.size - val realAddrMap = addrmap map { case AddrMapEntry(name, region) => - val start = lastEnd - val size = region.size - - require(isPow2(size), - s"Region $name size $size is not a power of 2") - require(start % size == 0, - f"Region $name start address 0x$start%x not divisible by 0x$size%x" ) - require(start >= lastEnd, - f"Region $name start address 0x$start%x before previous region end") - - lastEnd = start + size - (start, size) - } - + val addrHashMap = new AddrHashMap(addrmap, base) val routeSel = (addr: UInt) => { - Vec(realAddrMap.map { case (start, size) => - addr >= UInt(start) && addr < UInt(start + size) - }).toBits + Cat(addrmap.map { case entry => + val hashEntry = addrHashMap(entry.name) + addr >= UInt(hashEntry.start) && addr < UInt(hashEntry.start + hashEntry.region.size) + }.reverse) } val xbar = Module(new NastiCrossbar(nMasters, levelSize, routeSel)) xbar.io.masters <> io.masters - addrmap.zip(realAddrMap).zip(xbar.io.slaves).zipWithIndex.foreach { - case (((entry, (start, size)), xbarSlave), i) => { + io.slaves <> addrmap.zip(xbar.io.slaves).flatMap { + case (entry, xbarSlave) => { entry.region match { - case MemSize(_, _, _) => - io.slaves(slaveInd) <> xbarSlave - slaveInd += 1 + case _: MemSize => + Some(xbarSlave) + case MemSubmap(_, submap) if submap.isEmpty => + val err_slave = Module(new NastiErrorSlave) + err_slave.io <> xbarSlave + None case MemSubmap(_, submap) => - if (submap.isEmpty) { - val err_slave = Module(new NastiErrorSlave) - err_slave.io <> xbarSlave - } else { - val subSlaves = submap.countSlaves - val outputs = io.slaves.drop(slaveInd).take(subSlaves) - val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, start)) - ic.io.masters.head <> xbarSlave - for ((o, s) <- outputs zip ic.io.slaves) - o <> s - slaveInd += subSlaves - } + val subSlaves = submap.countSlaves + val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, addrHashMap(entry.name).start)) + ic.io.masters.head <> xbarSlave + ic.io.slaves } } } From be21f6962bde32ebc46fcb1a0d5d4e000d6694ff Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Mon, 2 May 2016 18:22:43 -0700 Subject: [PATCH 074/116] make GlobalAddrHashMap a config variable --- junctions/src/main/scala/addrmap.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index d5ddeb95..8eb6ddd7 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -16,6 +16,7 @@ case object PPNBits extends Field[Int] case object VPNBits extends Field[Int] case object GlobalAddrMap extends Field[AddrMap] +case object GlobalAddrHashMap extends Field[AddrHashMap] trait HasAddrMapParameters { implicit val p: Parameters @@ -29,7 +30,7 @@ trait HasAddrMapParameters { val pgLevelBits = p(PgLevelBits) val asIdBits = p(ASIdBits) - val addrMap = new AddrHashMap(p(GlobalAddrMap)) + val addrMap = p(GlobalAddrHashMap) } case class MemAttr(prot: Int, cacheable: Boolean = false) From 64991d3947baae0dd9f57fd18c476186e683a8b0 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 28 Apr 2016 18:44:35 -0700 Subject: [PATCH 075/116] add AXI to AHB converter --- junctions/src/main/scala/hasti.scala | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index fb38d1db..cbf1b4c6 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -251,3 +251,89 @@ class HastiSlaveToMaster(implicit p: Parameters) extends HastiModule()(p) { io.in.hreadyout := io.out.hready io.in.hresp := io.out.hresp } + +class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule()(p) + with HasNastiParameters { + val io = new Bundle { + val nasti = new NastiIO().flip + val hasti = new HastiMasterIO + } + + require(hastiAddrBits == nastiXAddrBits) + require(hastiDataBits == nastiXDataBits) + + val s_idle :: s_read :: s_write :: s_write_resp :: Nil = Enum(Bits(), 4) + val state = Reg(init = s_idle) + + val addr = Reg(UInt(width = hastiAddrBits)) + val id = Reg(UInt(width = nastiXIdBits)) + val size = Reg(UInt(width = nastiXSizeBits)) + val len = Reg(UInt(width = nastiXLenBits)) + val data = Reg(UInt(width = nastiXDataBits)) + val first = Reg(init = Bool(false)) + val rvalid = Reg(init = Bool(false)) + + io.nasti.aw.ready := (state === s_idle) + io.nasti.ar.ready := (state === s_idle) && !io.nasti.aw.valid + io.nasti.w.ready := (state === s_write) && io.hasti.hready + io.nasti.b.valid := (state === s_write_resp) + io.nasti.b.bits := NastiWriteResponseChannel(id = id) + io.nasti.r.valid := (state === s_read) && io.hasti.hready && !first + io.nasti.r.bits := NastiReadDataChannel( + id = id, + data = io.hasti.hrdata, + last = (len === UInt(0))) + + + io.hasti.haddr := addr + io.hasti.hsize := size + io.hasti.hwrite := (state === s_write) + io.hasti.hburst := HBURST_INCR + io.hasti.hprot := UInt(0) + io.hasti.hwdata := data + io.hasti.htrans := MuxLookup(state, HTRANS_IDLE, Seq( + s_write -> Mux(io.nasti.w.valid, + Mux(first, HTRANS_NONSEQ, HTRANS_SEQ), + Mux(first, HTRANS_IDLE, HTRANS_BUSY)), + s_read -> MuxCase(HTRANS_BUSY, Seq( + first -> HTRANS_NONSEQ, + (len === UInt(0)) -> HTRANS_IDLE, + io.nasti.r.ready -> HTRANS_SEQ)))) + + when (io.nasti.aw.fire()) { + first := Bool(true) + addr := io.nasti.aw.bits.addr + id := io.nasti.aw.bits.id + size := io.nasti.aw.bits.size + state := s_write + } + + when (io.nasti.ar.fire()) { + first := Bool(true) + addr := io.nasti.ar.bits.addr + id := io.nasti.ar.bits.id + size := io.nasti.ar.bits.size + len := io.nasti.ar.bits.len + state := s_read + } + + when (io.nasti.w.fire()) { + first := Bool(false) + addr := addr + (UInt(1) << size) + data := io.nasti.w.bits.data + when (io.nasti.w.bits.last) { state := s_write_resp } + } + + when (io.nasti.b.fire()) { state := s_idle } + + when (state === s_read && first) { + first := Bool(false) + addr := addr + (UInt(1) << size) + } + + when (io.nasti.r.fire()) { + addr := addr + (UInt(1) << size) + len := len - UInt(1) + when (len === UInt(0)) { state := s_idle } + } +} From 44740cb6b277240b32012a0276dcde83289c369b Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Fri, 29 Apr 2016 17:49:26 -0700 Subject: [PATCH 076/116] parameterize Hasti address and data bits --- junctions/src/main/scala/hasti.scala | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index cbf1b4c6..bf25138a 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -31,10 +31,14 @@ trait HastiConstants def dgate(valid: Bool, b: UInt) = Fill(b.getWidth, valid) & b } +case class HastiParameters(dataBits: Int, addrBits: Int) +case object HastiKey extends Field[HastiParameters] + trait HasHastiParameters { implicit val p: Parameters - val hastiAddrBits = 32 - val hastiDataBits = 32 + val hastiParams = p(HastiKey) + val hastiAddrBits = hastiParams.addrBits + val hastiDataBits = hastiParams.dataBits } abstract class HastiModule(implicit val p: Parameters) extends Module From 93447eb27468ee0bfb20d5e5bf0225bc04f35d30 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Tue, 24 May 2016 14:06:03 -0700 Subject: [PATCH 077/116] ahb: make hasti parameters location sensitive --- junctions/src/main/scala/hasti.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index bf25138a..28bd3011 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -32,11 +32,12 @@ trait HastiConstants } case class HastiParameters(dataBits: Int, addrBits: Int) -case object HastiKey extends Field[HastiParameters] +case object HastiId extends Field[String] +case class HastiKey(id: String) extends Field[HastiParameters] trait HasHastiParameters { implicit val p: Parameters - val hastiParams = p(HastiKey) + val hastiParams = p(HastiKey(p(HastiId))) val hastiAddrBits = hastiParams.addrBits val hastiDataBits = hastiParams.dataBits } From 7896c4157e859cb23e886fb66e45c0e795ac4cb8 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 18 May 2016 16:10:57 -0700 Subject: [PATCH 078/116] ahb: parameterize poci --- junctions/src/main/scala/package.scala | 2 +- junctions/src/main/scala/poci.scala | 18 ++++++------------ 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/junctions/src/main/scala/package.scala b/junctions/src/main/scala/package.scala index 3181064e..317b7109 100644 --- a/junctions/src/main/scala/package.scala +++ b/junctions/src/main/scala/package.scala @@ -1 +1 @@ -package object junctions extends HastiConstants with PociConstants +package object junctions extends HastiConstants diff --git a/junctions/src/main/scala/poci.scala b/junctions/src/main/scala/poci.scala index 01993fee..976fd304 100644 --- a/junctions/src/main/scala/poci.scala +++ b/junctions/src/main/scala/poci.scala @@ -3,20 +3,14 @@ package junctions import Chisel._ import cde.{Parameters, Field} -abstract trait PociConstants +class PociIO(implicit p: Parameters) extends HastiBundle()(p) { - val SZ_PADDR = 32 - val SZ_PDATA = 32 -} - -class PociIO extends Bundle -{ - val paddr = UInt(OUTPUT, SZ_PADDR) + val paddr = UInt(OUTPUT, hastiAddrBits) val pwrite = Bool(OUTPUT) val psel = Bool(OUTPUT) val penable = Bool(OUTPUT) - val pwdata = UInt(OUTPUT, SZ_PDATA) - val prdata = UInt(INPUT, SZ_PDATA) + val pwdata = UInt(OUTPUT, hastiDataBits) + val prdata = UInt(INPUT, hastiDataBits) val pready = Bool(INPUT) val pslverr = Bool(INPUT) } @@ -45,7 +39,7 @@ class HastiToPociBridge(implicit p: Parameters) extends HastiModule()(p) { } } - val haddr_reg = Reg(UInt(width = SZ_PADDR)) + val haddr_reg = Reg(UInt(width = hastiAddrBits)) val hwrite_reg = Reg(UInt(width = 1)) when (transfer) { haddr_reg := io.in.haddr @@ -62,7 +56,7 @@ class HastiToPociBridge(implicit p: Parameters) extends HastiModule()(p) { io.in.hresp := io.out.pslverr } -class PociBus(amap: Seq[UInt=>Bool]) extends Module +class PociBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { val master = new PociIO().flip From 2f8a77f27aa05938007cd14e4c71ce9b45588f0b Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 18 May 2016 15:57:10 -0700 Subject: [PATCH 079/116] ahb: include all AHB-lite constants --- junctions/src/main/scala/hasti.scala | 35 ++++++++++++++++++---------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 28bd3011..628857d0 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -5,28 +5,39 @@ import cde.{Parameters, Field} trait HastiConstants { + // Values for htrans val SZ_HTRANS = 2 - val HTRANS_IDLE = UInt(0, SZ_HTRANS) - val HTRANS_BUSY = UInt(1, SZ_HTRANS) - val HTRANS_NONSEQ = UInt(2, SZ_HTRANS) - val HTRANS_SEQ = UInt(3, SZ_HTRANS) + val HTRANS_IDLE = UInt(0, SZ_HTRANS) // No transfer requested, not in a burst + val HTRANS_BUSY = UInt(1, SZ_HTRANS) // No transfer requested, in a burst + val HTRANS_NONSEQ = UInt(2, SZ_HTRANS) // First (potentially only) request in a burst + val HTRANS_SEQ = UInt(3, SZ_HTRANS) // Following requests in a burst + // Values for hburst val SZ_HBURST = 3 - val HBURST_SINGLE = UInt(0, SZ_HBURST) - val HBURST_INCR = UInt(1, SZ_HBURST) - val HBURST_WRAP4 = UInt(2, SZ_HBURST) - val HBURST_INCR4 = UInt(3, SZ_HBURST) - val HBURST_WRAP8 = UInt(4, SZ_HBURST) - val HBURST_INCR8 = UInt(5, SZ_HBURST) - val HBURST_WRAP16 = UInt(6, SZ_HBURST) - val HBURST_INCR16 = UInt(7, SZ_HBURST) + val HBURST_SINGLE = UInt(0, SZ_HBURST) // Single access (no burst) + val HBURST_INCR = UInt(1, SZ_HBURST) // Incrementing burst of arbitrary length, not crossing 1KB + val HBURST_WRAP4 = UInt(2, SZ_HBURST) // 4-beat wrapping burst + val HBURST_INCR4 = UInt(3, SZ_HBURST) // 4-beat incrementing burst + val HBURST_WRAP8 = UInt(4, SZ_HBURST) // 8-beat wrapping burst + val HBURST_INCR8 = UInt(5, SZ_HBURST) // 8-beat incrementing burst + val HBURST_WRAP16 = UInt(6, SZ_HBURST) // 16-beat wrapping burst + val HBURST_INCR16 = UInt(7, SZ_HBURST) // 16-beat incrementing burst + // Values for hresp val SZ_HRESP = 1 val HRESP_OKAY = UInt(0, SZ_HRESP) val HRESP_ERROR = UInt(1, SZ_HRESP) + // Values for hsize are identical to TileLink MT_SZ + // ie: 8*2^SZ_HSIZE bit transfers val SZ_HSIZE = 3 + + // Values for hprot (a bitmask) val SZ_HPROT = 4 + def HPROT_DATA = UInt("b0001") // Data access or Opcode fetch + def HPROT_PRIVILEGED = UInt("b0010") // Privileged or User access + def HPROT_BUFFERABLE = UInt("b0100") // Bufferable or non-bufferable + def HPROT_CACHEABLE = UInt("b1000") // Cacheable or non-cacheable def dgate(valid: Bool, b: UInt) = Fill(b.getWidth, valid) & b } From ef2aae26a83d3555bb1249cf0fc883452073e71d Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 18 May 2016 15:59:54 -0700 Subject: [PATCH 080/116] ahb: rename hreadyout to standard hready, mark hreadyin for death --- junctions/src/main/scala/hasti.scala | 24 ++++++++++++------------ junctions/src/main/scala/poci.scala | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 628857d0..9275428c 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -86,10 +86,10 @@ class HastiSlaveIO(implicit p: Parameters) extends HastiBundle()(p) { val hwdata = Bits(INPUT, hastiDataBits) val hrdata = Bits(OUTPUT, hastiDataBits) - val hsel = Bool(INPUT) - val hreadyin = Bool(INPUT) - val hreadyout = Bool(OUTPUT) - val hresp = UInt(OUTPUT, SZ_HRESP) + val hsel = Bool(INPUT) + val hreadyin = Bool(INPUT) // !!! non-standard signal + val hready = Bool(OUTPUT) + val hresp = UInt(OUTPUT, SZ_HRESP) } class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModule()(p) { @@ -137,11 +137,11 @@ class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModul } } val s1_hsels = Array.fill(amap.size){Reg(init = Bool(false))} - val hreadyouts = io.slaves.map(_.hreadyout) - val master_hready = s1_hsels.reduce(_||_) === Bool(false) || Mux1H(s1_hsels, hreadyouts) + val hreadys = io.slaves.map(_.hready) + val master_hready = s1_hsels.reduce(_||_) === Bool(false) || Mux1H(s1_hsels, hreadys) when (master_hready) { - val skid = s1_hsels.reduce(_||_) && (hsels zip hreadyouts).map{ case (s, r) => s && !r }.reduce(_||_) + val skid = s1_hsels.reduce(_||_) && (hsels zip hreadys).map{ case (s, r) => s && !r }.reduce(_||_) skb_valid := skid when (skid) { skb_haddr := io.master.haddr @@ -185,7 +185,7 @@ class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { val s1_grants = Array.fill(n){Reg(init = Bool(true))} (s1_grants zip grants) foreach { case (g1, g) => - when (io.out.hreadyout) { g1 := g } + when (io.out.hready) { g1 := g } } def sel[T <: Data](in: Seq[T], s1: Seq[T]) = @@ -201,7 +201,7 @@ class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { io.out.hsel := grants.reduce(_||_) (io.ins zipWithIndex) map { case (in, i) => { - when (io.out.hreadyout) { + when (io.out.hready) { when (grants(i)) { skb_valid(i) := Bool(false) } @@ -222,12 +222,12 @@ class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { } } io.out.hwdata := Mux1H(s1_grants, io.ins.map(_.hwdata)) - io.out.hreadyin := io.out.hreadyout + io.out.hreadyin := io.out.hready (io.ins zipWithIndex) foreach { case (in, i) => { val g1 = s1_grants(i) in.hrdata := dgate(g1, io.out.hrdata) - in.hreadyout := io.out.hreadyout && (!skb_valid(i) || g1) + in.hready := io.out.hready && (!skb_valid(i) || g1) in.hresp := dgate(g1, io.out.hresp) } } } @@ -264,7 +264,7 @@ class HastiSlaveToMaster(implicit p: Parameters) extends HastiModule()(p) { io.out.hmastlock := io.in.hmastlock io.out.hwdata := io.in.hwdata io.in.hrdata := io.out.hrdata - io.in.hreadyout := io.out.hready + io.in.hready := io.out.hready io.in.hresp := io.out.hresp } diff --git a/junctions/src/main/scala/poci.scala b/junctions/src/main/scala/poci.scala index 976fd304..b4248fe2 100644 --- a/junctions/src/main/scala/poci.scala +++ b/junctions/src/main/scala/poci.scala @@ -52,7 +52,7 @@ class HastiToPociBridge(implicit p: Parameters) extends HastiModule()(p) { io.out.penable := (state === s_access) io.out.pwdata := io.in.hwdata io.in.hrdata := io.out.prdata - io.in.hreadyout := ((state === s_access) & io.out.pready) | (state === s_idle) + io.in.hready := ((state === s_access) & io.out.pready) | (state === s_idle) io.in.hresp := io.out.pslverr } From 2b37f37335fbfdd986e977baeee3eb307c6307d5 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 18 May 2016 16:23:32 -0700 Subject: [PATCH 081/116] ahb: helper methods --- junctions/src/main/scala/hasti.scala | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 9275428c..50a5f822 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -72,6 +72,10 @@ class HastiMasterIO(implicit p: Parameters) extends HastiBundle()(p) { val hready = Bool(INPUT) val hresp = UInt(INPUT, SZ_HRESP) + + def isNSeq(dummy:Int=0) = htrans === HTRANS_NONSEQ // SEQ does not start a NEW request + def isHold(dummy:Int=0) = htrans === HTRANS_BUSY || htrans === HTRANS_SEQ + def isIdle(dummy:Int=0) = htrans === HTRANS_IDLE || htrans === HTRANS_BUSY } class HastiSlaveIO(implicit p: Parameters) extends HastiBundle()(p) { From 0368b6db6b61db57cb1a259a12d9611d5a8a8b0d Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 18 May 2016 13:21:12 -0700 Subject: [PATCH 082/116] ahb: replace defective crossbar with a functional one The previous crossbar had the following bugs: 1. a bursting master could be preempted the AHB-lite spec requires a slave receive the entire burst 2. a waited master could be replaced the AHB-lite spec requires haddr/etc to remain unchanged 3. hmastlock did no ensure exclusive access atomic operations could be pre-empted --- junctions/src/main/scala/hasti.scala | 215 ++++++++++++++++++++++++--- 1 file changed, 198 insertions(+), 17 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 50a5f822..5b0fb64e 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -96,6 +96,204 @@ class HastiSlaveIO(implicit p: Parameters) extends HastiBundle()(p) { val hresp = UInt(OUTPUT, SZ_HRESP) } +/* A diverted master is told hready when his address phase goes nowhere. + * In this case, we buffer his address phase request and replay it later. + * NOTE: this must optimize to nothing when divert is constantly false. + */ +class MasterDiversion(implicit p: Parameters) extends HastiModule()(p) { + val io = new Bundle { + val in = (new HastiMasterIO).flip + val out = (new HastiMasterIO) + val divert = Bool(INPUT) + } + + val full = Reg(init = Bool(false)) + val buffer = Reg(new HastiMasterIO) + + when (io.out.hready) { + full := Bool(false) + } + when (io.divert) { + full := Bool(true) + buffer := io.in + } + + // If the master is diverted, he must also have been told hready + assert (!io.divert || io.in.hready); + + // Replay the request we diverted + io.out.htrans := Mux(full, buffer.htrans, io.in.htrans) + io.out.hmastlock := Mux(full, buffer.hmastlock, io.in.hmastlock) + io.out.haddr := Mux(full, buffer.haddr, io.in.haddr) + io.out.hwrite := Mux(full, buffer.hwrite, io.in.hwrite) + io.out.hburst := Mux(full, buffer.hburst, io.in.hburst) + io.out.hsize := Mux(full, buffer.hsize, io.in.hsize) + io.out.hprot := Mux(full, buffer.hprot, io.in.hprot) + io.out.hwdata := Mux(full, buffer.hwdata, io.in.hwdata) + + // Pass slave response back + io.in.hrdata := io.out.hrdata + io.in.hresp := io.out.hresp + io.in.hready := io.out.hready && !full // Block master while we steal his address phase +} + +/* Masters with lower index have priority over higher index masters. + * However, a lower priority master will retain control of a slave when EITHER: + * 1. a burst is in progress (switching slaves mid-burst violates AHB-lite at slave) + * 2. a transfer was waited (the standard forbids changing requests in this case) + * + * If a master raises hmastlock, it will be waited until no other master has inflight + * requests; then, it acquires exclusive control of the crossbar until hmastlock is low. + * + * To implement an AHB-lite crossbar, it is important to realize that requests and + * responses are coupled. Unlike modern bus protocols where the response data has flow + * control independent of the request data, in AHB-lite, both flow at the same time at + * the sole discretion of the slave via the hready signal. The address and data are + * delivered on two back-to-back cycles, the so-called address and data phases. + * + * Masters can only be connected to a single slave at a time. If a master had two different + * slave connections on the address and data phases, there would be two independent hready + * signals. An AHB-lite slave can assume that data flows when it asserts hready. If the data + * slave deasserts hready while the address slave asserts hready, the master is put in the + * impossible position of being in data phase on two slaves at once. For this reason, when + * a master issues back-to-back accesses to distinct slaves, we inject a pipeline bubble + * between the two requests to limit the master to just a single slave at a time. + * + * Conversely, a slave CAN have two masters attached to it. This is unproblematic, because + * the only signal which governs data flow is hready. Thus, both masters can be stalled + * safely by the single slave. + */ +class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModule()(p) { + val io = new Bundle { + val masters = Vec(nMasters, new HastiMasterIO).flip + val slaves = Vec(addressMap.size, new HastiSlaveIO).flip + } + + val nSlaves = addressMap.size + // !!! handle hmastlock + + // Setup diversions infront of each master + val diversions = Seq.tabulate(nMasters) { m => Module(new MasterDiversion) } + (io.masters zip diversions) foreach { case (m, d) => d.io.in <> m } + + // Handy short-hand + val masters = diversions map (_.io.out) + val slaves = io.slaves + + // This matrix governs the master-slave connections in the address phase + // It is indexed by addressPhaseGrantSM(slave)(master) + // It is guaranteed to have at most one 'true' per column and per row + val addressPhaseGrantSM = Wire(Vec(nSlaves, Vec(nMasters, Bool()))) + // This matrix governs the master-slave connections in the data phase + // It is guaranteed to have at most one 'true' per column and per row + val dataPhaseGrantSM = Reg (init = Vec.fill(nSlaves)(Vec.fill(nMasters)(Bool(false)))) + // This matrix is the union of the address and data phases. + // It is transposed with respect to the two previous matrices. + // It is guaranteed to contain at most one 'true' per master row. + // However, two 'true's per slave column are permitted. + val unionGrantMS = Vec.tabulate(nMasters) { m => Vec.tabulate(nSlaves) { s => + addressPhaseGrantSM(s)(m) || dataPhaseGrantSM(s)(m) } } + + // Confirm the guarantees made above + def justOnce(v: Vec[Bool]) = v.fold(Bool(false)) { case (p, v) => + assert (!p || !v) + p || v + } + addressPhaseGrantSM foreach { s => justOnce(s) } + unionGrantMS foreach { s => justOnce(s) } + + // Data phase follows address phase whenever the slave is ready + (slaves zip (dataPhaseGrantSM zip addressPhaseGrantSM)) foreach { case (s, (d, a)) => + when (s.hready) { d := a } + } + + // Record the grant state from the previous cycle; needed in case we hold access + val priorAddressPhaseGrantSM = RegNext(addressPhaseGrantSM) + + // If a master says BUSY or SEQ, it is in the middle of a burst. + // In this case, it MUST stay attached to the same slave as before. + // Otherwise, it would violate the AHB-lite specification as seen by + // the slave, which is guaranteed a complete burst of the promised length. + // One case where this matters is preventing preemption of low-prio masters. + // NOTE: this exposes a slave to bad addresses when a master is buggy + val holdBurstM = Vec(masters map { _.isHold() }) + + // Transform the burst hold requirement from master indexing to slave indexing + // We use the previous cycle's binding because the master continues the prior burst + val holdBurstS = Vec(priorAddressPhaseGrantSM map { m => Mux1H(m, holdBurstM) }) + + // If a slave says !hready to a request, it must retain the same master next cycle. + // The AHB-lite specification requires that a waited transfer remain unchanged. + // If we preempted a waited master, the new master's request could potentially differ. + val holdBusyS = RegNext(Vec(slaves map { s => !s.hready && s.hsel })) + + // Combine the above two grounds to determine if the slave retains its prior master + val holdS = Vec((holdBurstS zip holdBusyS) map ({ case (a,b) => a||b })) + + // Determine which master addresses match which slaves + val matchMS = Vec(masters map { m => Vec(addressMap map { afn => afn(m.haddr) }) }) + // Detect requests to nowhere; we need to allow progress in this case + val nowhereM = Vec(matchMS map { s => !s.reduce(_ || _) }) + + // Detect if we need to inject a pipeline bubble between the master requests. + // Divert masters already granted a data phase different from next request. + // NOTE: if only one slave, matchMS is always true => bubble always false + // => the diversion registers are optimized away as they are unread + // NOTE: bubble => dataPhase => have an hready signal + val bubbleM = + Vec.tabulate(nMasters) { m => + Vec.tabulate(nSlaves) { s => dataPhaseGrantSM(s)(m) && !matchMS(m)(s) } + .reduce(_ || _) } + + // Requested access to slaves from masters (pre-arbitration) + // NOTE: isNSeq does NOT include SEQ; thus, masters who are midburst do not + // request access to a new slave. They stay tied to the old and do not get two. + // NOTE: if a master was waited, it must repeat the same request as last cycle; + // thus, it will request the same slave and not end up with two (unless buggy). + val NSeq = Vec(masters.map(_.isNSeq())) + val requestSM = Vec.tabulate(nSlaves) { s => Vec.tabulate(nMasters) { m => matchMS(m)(s) && NSeq(m) && !bubbleM(m) } } + + // Select at most one master request per slave (lowest index = highest priority) + val selectedRequestSM = Vec(requestSM map { m => Vec(PriorityEncoderOH(m)) }) + + // Calculate new crossbar interconnect state + addressPhaseGrantSM := Vec((holdS zip (priorAddressPhaseGrantSM zip selectedRequestSM)) + map { case (h, (p, r)) => Mux(h, p, r) }) + + // If we diverted a master, we need to absorb his address phase to replay later + for (m <- 0 until nMasters) { + diversions(m).io.divert := bubbleM(m) && NSeq(m) && masters(m).hready + } + + // Master muxes (address and data phase are the same) + (masters zip (unionGrantMS zip nowhereM)) foreach { case (m, (g, n)) => { + // If the master is connected to a slave, the slave determines hready. + // However, if no slave is connected, for progress report ready anyway, if: + // bad address (swallow request) OR idle (permit stupid slaves to move FSM) + val autoready = n || m.isIdle() + m.hready := Mux1H(g, slaves.map(_.hready ^ autoready)) ^ autoready + m.hrdata := Mux1H(g, slaves.map(_.hrdata)) + m.hresp := Mux1H(g, slaves.map(_.hresp)) + } } + + // Slave address phase muxes + (slaves zip addressPhaseGrantSM) foreach { case (s, g) => { + s.htrans := Mux1H(g, masters.map(_.htrans)) // defaults to HTRANS_IDLE (0) + s.haddr := Mux1H(g, masters.map(_.haddr)) + s.hmastlock := Mux1H(g, masters.map(_.hmastlock)) // !!! use global crossbar lock state + s.hwrite := Mux1H(g, masters.map(_.hwrite)) + s.hsize := Mux1H(g, masters.map(_.hsize)) + s.hburst := Mux1H(g, masters.map(_.hburst)) + s.hprot := Mux1H(g, masters.map(_.hprot)) + s.hsel := g.reduce(_ || _) + } } + + // Slave data phase muxes + (slaves zip dataPhaseGrantSM) foreach { case (s, g) => { + s.hwdata := Mux1H(g, masters.map(_.hwdata)) + } } +} + class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { val master = new HastiMasterIO().flip @@ -236,23 +434,6 @@ class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { } } } -class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool]) - (implicit p: Parameters) extends HastiModule()(p) { - val io = new Bundle { - val masters = Vec(nMasters, new HastiMasterIO).flip - val slaves = Vec(addressMap.size, new HastiSlaveIO).flip - } - - val buses = List.fill(nMasters){Module(new HastiBus(addressMap))} - val muxes = List.fill(addressMap.size){Module(new HastiSlaveMux(nMasters))} - - (buses.map(b => b.io.master) zip io.masters) foreach { case (b, m) => b <> m } - (muxes.map(m => m.io.out) zip io.slaves ) foreach { case (x, s) => x <> s } - for (m <- 0 until nMasters; s <- 0 until addressMap.size) yield { - buses(m).io.slaves(s) <> muxes(s).io.ins(m) - } -} - class HastiSlaveToMaster(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { val in = new HastiSlaveIO From f30f8d9f79631d45f3ecb568d81cfc3fbac9ea6b Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 18 May 2016 16:31:06 -0700 Subject: [PATCH 083/116] ahb: reduce obsolete degenerate cases of a crossbar --- junctions/src/main/scala/hasti.scala | 133 ++------------------------- 1 file changed, 8 insertions(+), 125 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 5b0fb64e..19726cf1 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -300,69 +300,9 @@ class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModul val slaves = Vec(amap.size, new HastiSlaveIO).flip } - // skid buffer - val skb_valid = Reg(init = Bool(false)) - val skb_haddr = Reg(UInt(width = hastiAddrBits)) - val skb_hwrite = Reg(Bool()) - val skb_hsize = Reg(UInt(width = SZ_HSIZE)) - val skb_hburst = Reg(UInt(width = SZ_HBURST)) - val skb_hprot = Reg(UInt(width = SZ_HPROT)) - val skb_htrans = Reg(UInt(width = SZ_HTRANS)) - val skb_hmastlock = Reg(Bool()) - val skb_hwdata = Reg(UInt(width = hastiDataBits)) - - val master_haddr = Mux(skb_valid, skb_haddr, io.master.haddr) - val master_hwrite = Mux(skb_valid, skb_hwrite, io.master.hwrite) - val master_hsize = Mux(skb_valid, skb_hsize, io.master.hsize) - val master_hburst = Mux(skb_valid, skb_hburst, io.master.hburst) - val master_hprot = Mux(skb_valid, skb_hprot, io.master.hprot) - val master_htrans = Mux(skb_valid, skb_htrans, io.master.htrans) - val master_hmastlock = Mux(skb_valid, skb_hmastlock, io.master.hmastlock) - val master_hwdata = Mux(skb_valid, skb_hwdata, io.master.hwdata) - - val hsels = PriorityEncoderOH( - (io.slaves zip amap) map { case (s, afn) => { - s.haddr := master_haddr - s.hwrite := master_hwrite - s.hsize := master_hsize - s.hburst := master_hburst - s.hprot := master_hprot - s.htrans := master_htrans - s.hmastlock := master_hmastlock - s.hwdata := master_hwdata - afn(master_haddr) && master_htrans.orR - }}) - - (io.slaves zip hsels) foreach { case (s, hsel) => { - s.hsel := hsel - s.hreadyin := skb_valid || io.master.hready - } } - - val s1_hsels = Array.fill(amap.size){Reg(init = Bool(false))} - val hreadys = io.slaves.map(_.hready) - val master_hready = s1_hsels.reduce(_||_) === Bool(false) || Mux1H(s1_hsels, hreadys) - - when (master_hready) { - val skid = s1_hsels.reduce(_||_) && (hsels zip hreadys).map{ case (s, r) => s && !r }.reduce(_||_) - skb_valid := skid - when (skid) { - skb_haddr := io.master.haddr - skb_hwrite := io.master.hwrite - skb_hsize := io.master.hsize - skb_hburst := io.master.hburst - skb_hprot := io.master.hprot - skb_htrans := io.master.htrans - skb_hmastlock := io.master.hmastlock - } - - (s1_hsels zip hsels) foreach { case (s1, s) => - s1 := s - } - } - - io.master.hready := !skb_valid && master_hready - io.master.hrdata := Mux1H(s1_hsels, io.slaves.map(_.hrdata)) - io.master.hresp := Mux1H(s1_hsels, io.slaves.map(_.hresp)) + val bar = Module(new HastiXbar(1, amap)) + io.master <> bar.io.masters(0) + io.slaves <> bar.io.slaves } class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { @@ -370,68 +310,11 @@ class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { val ins = Vec(n, new HastiSlaveIO) val out = new HastiSlaveIO().flip } - - // skid buffers - val skb_valid = Array.fill(n){Reg(init = Bool(false))} - val skb_haddr = Array.fill(n){Reg(UInt(width = hastiAddrBits))} - val skb_hwrite = Array.fill(n){Reg(Bool())} - val skb_hsize = Array.fill(n){Reg(UInt(width = SZ_HSIZE))} - val skb_hburst = Array.fill(n){Reg(UInt(width = SZ_HBURST))} - val skb_hprot = Array.fill(n){Reg(UInt(width = SZ_HPROT))} - val skb_htrans = Array.fill(n){Reg(UInt(width = SZ_HTRANS))} - val skb_hmastlock = Array.fill(n){Reg(Bool())} - - val requests = (io.ins zip skb_valid) map { case (in, v) => in.hsel && in.hreadyin || v } - val grants = PriorityEncoderOH(requests) - - val s1_grants = Array.fill(n){Reg(init = Bool(true))} - - (s1_grants zip grants) foreach { case (g1, g) => - when (io.out.hready) { g1 := g } - } - - def sel[T <: Data](in: Seq[T], s1: Seq[T]) = - Vec((skb_valid zip s1 zip in) map { case ((v, s), in) => Mux(v, s, in) }) - - io.out.haddr := Mux1H(grants, sel(io.ins.map(_.haddr), skb_haddr)) - io.out.hwrite := Mux1H(grants, sel(io.ins.map(_.hwrite), skb_hwrite)) - io.out.hsize := Mux1H(grants, sel(io.ins.map(_.hsize), skb_hsize)) - io.out.hburst := Mux1H(grants, sel(io.ins.map(_.hburst), skb_hburst)) - io.out.hprot := Mux1H(grants, sel(io.ins.map(_.hprot), skb_hprot)) - io.out.htrans := Mux1H(grants, sel(io.ins.map(_.htrans), skb_htrans)) - io.out.hmastlock := Mux1H(grants, sel(io.ins.map(_.hmastlock), skb_hmastlock)) - io.out.hsel := grants.reduce(_||_) - - (io.ins zipWithIndex) map { case (in, i) => { - when (io.out.hready) { - when (grants(i)) { - skb_valid(i) := Bool(false) - } - when (!grants(i) && !skb_valid(i)) { - val valid = in.hsel && in.hreadyin - skb_valid(i) := valid - when (valid) { // clock-gate - skb_haddr(i) := in.haddr - skb_hwrite(i) := in.hwrite - skb_hsize(i) := in.hsize - skb_hburst(i) := in.hburst - skb_hprot(i) := in.hprot - skb_htrans(i) := in.htrans - skb_hmastlock(i) := in.hmastlock - } - } - } - } } - - io.out.hwdata := Mux1H(s1_grants, io.ins.map(_.hwdata)) - io.out.hreadyin := io.out.hready - - (io.ins zipWithIndex) foreach { case (in, i) => { - val g1 = s1_grants(i) - in.hrdata := dgate(g1, io.out.hrdata) - in.hready := io.out.hready && (!skb_valid(i) || g1) - in.hresp := dgate(g1, io.out.hresp) - } } + + val amap = Seq({ (_:UInt) => Bool(true)}) + val bar = Module(new HastiXbar(n, amap)) + io.ins <> bar.io.masters + io.out <> bar.io.slaves(0) } class HastiSlaveToMaster(implicit p: Parameters) extends HastiModule()(p) { From 15cad8414da11d3b2b49ed3d8328285cdcac5458 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 18 May 2016 16:45:15 -0700 Subject: [PATCH 084/116] ahb: put signals in the order they appear in signal traces in the spec --- junctions/src/main/scala/hasti.scala | 42 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 19726cf1..cf5a319b 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -59,16 +59,16 @@ abstract class HastiBundle(implicit val p: Parameters) extends ParameterizedBund with HasHastiParameters class HastiMasterIO(implicit p: Parameters) extends HastiBundle()(p) { - val haddr = UInt(OUTPUT, hastiAddrBits) - val hwrite = Bool(OUTPUT) - val hsize = UInt(OUTPUT, SZ_HSIZE) - val hburst = UInt(OUTPUT, SZ_HBURST) - val hprot = UInt(OUTPUT, SZ_HPROT) val htrans = UInt(OUTPUT, SZ_HTRANS) val hmastlock = Bool(OUTPUT) + val haddr = UInt(OUTPUT, hastiAddrBits) + val hwrite = Bool(OUTPUT) + val hburst = UInt(OUTPUT, SZ_HBURST) + val hsize = UInt(OUTPUT, SZ_HSIZE) + val hprot = UInt(OUTPUT, SZ_HPROT) val hwdata = Bits(OUTPUT, hastiDataBits) - val hrdata = Bits(INPUT, hastiDataBits) + val hrdata = Bits(INPUT, hastiDataBits) val hready = Bool(INPUT) val hresp = UInt(INPUT, SZ_HRESP) @@ -79,15 +79,15 @@ class HastiMasterIO(implicit p: Parameters) extends HastiBundle()(p) { } class HastiSlaveIO(implicit p: Parameters) extends HastiBundle()(p) { - val haddr = UInt(INPUT, hastiAddrBits) - val hwrite = Bool(INPUT) - val hsize = UInt(INPUT, SZ_HSIZE) - val hburst = UInt(INPUT, SZ_HBURST) - val hprot = UInt(INPUT, SZ_HPROT) val htrans = UInt(INPUT, SZ_HTRANS) val hmastlock = Bool(INPUT) + val haddr = UInt(INPUT, hastiAddrBits) + val hwrite = Bool(INPUT) + val hburst = UInt(INPUT, SZ_HBURST) + val hsize = UInt(INPUT, SZ_HSIZE) + val hprot = UInt(INPUT, SZ_HPROT) - val hwdata = Bits(INPUT, hastiDataBits) + val hwdata = Bits(INPUT, hastiDataBits) val hrdata = Bits(OUTPUT, hastiDataBits) val hsel = Bool(INPUT) @@ -319,21 +319,21 @@ class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { class HastiSlaveToMaster(implicit p: Parameters) extends HastiModule()(p) { val io = new Bundle { - val in = new HastiSlaveIO + val in = new HastiSlaveIO val out = new HastiMasterIO } - io.out.haddr := io.in.haddr - io.out.hwrite := io.in.hwrite - io.out.hsize := io.in.hsize - io.out.hburst := io.in.hburst - io.out.hprot := io.in.hprot - io.out.htrans := Mux(io.in.hsel && io.in.hreadyin, io.in.htrans, HTRANS_IDLE) + io.out.htrans := Mux(io.in.hsel && io.in.hreadyin, io.in.htrans, HTRANS_IDLE) io.out.hmastlock := io.in.hmastlock - io.out.hwdata := io.in.hwdata + io.out.haddr := io.in.haddr + io.out.hwrite := io.in.hwrite + io.out.hburst := io.in.hburst + io.out.hsize := io.in.hsize + io.out.hprot := io.in.hprot + io.out.hwdata := io.in.hwdata io.in.hrdata := io.out.hrdata io.in.hready := io.out.hready - io.in.hresp := io.out.hresp + io.in.hresp := io.out.hresp } class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule()(p) From 1db40687c6e55a3205ec81e6a2ce194ce1ad3e3c Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 18 May 2016 16:46:28 -0700 Subject: [PATCH 085/116] ahb: eliminate now-unnecesary non-standard hreadyin --- junctions/src/main/scala/hasti.scala | 3 +-- junctions/src/main/scala/poci.scala | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index cf5a319b..ed35d253 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -91,7 +91,6 @@ class HastiSlaveIO(implicit p: Parameters) extends HastiBundle()(p) { val hrdata = Bits(OUTPUT, hastiDataBits) val hsel = Bool(INPUT) - val hreadyin = Bool(INPUT) // !!! non-standard signal val hready = Bool(OUTPUT) val hresp = UInt(OUTPUT, SZ_HRESP) } @@ -323,7 +322,7 @@ class HastiSlaveToMaster(implicit p: Parameters) extends HastiModule()(p) { val out = new HastiMasterIO } - io.out.htrans := Mux(io.in.hsel && io.in.hreadyin, io.in.htrans, HTRANS_IDLE) + io.out.htrans := Mux(io.in.hsel, io.in.htrans, HTRANS_IDLE) io.out.hmastlock := io.in.hmastlock io.out.haddr := io.in.haddr io.out.hwrite := io.in.hwrite diff --git a/junctions/src/main/scala/poci.scala b/junctions/src/main/scala/poci.scala index b4248fe2..ac089164 100644 --- a/junctions/src/main/scala/poci.scala +++ b/junctions/src/main/scala/poci.scala @@ -23,7 +23,7 @@ class HastiToPociBridge(implicit p: Parameters) extends HastiModule()(p) { val s_idle :: s_setup :: s_access :: Nil = Enum(UInt(), 3) val state = Reg(init = s_idle) - val transfer = io.in.hsel & io.in.hreadyin & io.in.htrans(1) + val transfer = io.in.hsel & io.in.htrans(1) switch (state) { is (s_idle) { From e1e8eda41900ab621eb076a698c402128c6f4a1a Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Tue, 24 May 2016 14:06:57 -0700 Subject: [PATCH 086/116] ahb: add a test SRAM --- junctions/src/main/scala/hasti.scala | 85 ++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index ed35d253..3bb58347 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -51,6 +51,8 @@ trait HasHastiParameters { val hastiParams = p(HastiKey(p(HastiId))) val hastiAddrBits = hastiParams.addrBits val hastiDataBits = hastiParams.dataBits + val hastiDataBytes = hastiDataBits/8 + val hastiAlignment = log2Up(hastiDataBytes) } abstract class HastiModule(implicit val p: Parameters) extends Module @@ -420,3 +422,86 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( when (len === UInt(0)) { state := s_idle } } } + +class HastiTestSRAM(depth: Int)(implicit p: Parameters) extends HastiModule()(p) { + val io = new HastiSlaveIO + + // This is a test SRAM with random delays + val ready = LFSR16(Bool(true))(0) // Bool(true) + + // Calculate the bitmask of which bytes are being accessed + val mask_decode = Vec.tabulate(hastiAlignment+1) (UInt(_) <= io.hsize) + val mask_wide = Vec.tabulate(hastiDataBytes) { i => mask_decode(log2Up(i+1)) } + val mask_shift = mask_wide.toBits().asUInt() << io.haddr(hastiAlignment-1,0) + + // The request had better have been aligned! (AHB-lite requires this) + assert ((io.haddr & mask_decode.toBits()(hastiAlignment,1).asUInt) === UInt(0)) + + // The mask and address during the address phase + val a_request = io.hsel && (io.htrans === HTRANS_NONSEQ || io.htrans === HTRANS_SEQ) + val a_mask = mask_shift(hastiDataBytes-1, 0) + val a_address = io.haddr >> UInt(hastiAlignment) + val a_write = io.hwrite + + // The data phase signals + val d_read = RegEnable(a_request && !a_write, Bool(false), ready) + val d_mask = RegEnable(a_mask, ready && a_request) + val d_wdata = Vec.tabulate(hastiDataBytes) { i => io.hwdata(8*(i+1)-1, 8*i) } + + // AHB writes must occur during the data phase; this poses a structural + // hazard with reads which must occur during the address phase. To solve + // this problem, we delay the writes until there is a free cycle. + // + // The idea is to record the address information from address phase and + // then as soon as possible flush the pending write. This cannot be done + // on a cycle when there is an address phase read, but on any other cycle + // the write will execute. In the case of reads following a write, the + // result must bypass data from the pending write into the read if they + // happen to have matching address. + + // Remove this once HoldUnless is in chisel3 + def holdUnless[T <: Data](in : T, enable: Bool): T = Mux(!enable, RegEnable(in, enable), in) + + // Pending write? + val p_valid = RegInit(Bool(false)) + val p_address = Reg(a_address) + val p_mask = Reg(a_mask) + val p_latch_d = RegNext(ready && a_request && a_write, Bool(false)) + val p_wdata = holdUnless(d_wdata, p_latch_d) + + // Use single-ported memory with byte-write enable + val mem = SeqMem(depth, Vec(hastiDataBytes, Bits(width = 8))) + + // Decide is the SRAM port is used for reading or (potentially) writing + val read = ready && a_request && !a_write + // In case we are stalled, we need to hold the read data + val d_rdata = holdUnless(mem.read(a_address, read), RegNext(read)) + // Whenever the port is not needed for reading, execute pending writes + when (!read) { + when (p_valid) { mem.write(p_address, p_wdata, p_mask.toBools) } + p_valid := Bool(false) + } + + // Record the request for later? + when (ready && a_request && a_write) { + p_valid := Bool(true) + p_address := a_address + p_mask := a_mask + } + + // Does the read need to be muxed with the previous write? + val a_bypass = a_address === p_address && p_valid + val d_bypass = RegEnable(a_bypass, ready && a_request) + + // Mux in data from the pending write + val muxdata = Vec((p_mask.toBools zip (p_wdata zip d_rdata)) + map { case (m, (p, r)) => Mux(d_bypass && m, p, r) }) + // Wipe out any data the master should not see (for testing) + val outdata = Vec((d_mask.toBools zip muxdata) + map { case (m, p) => Mux(d_read && ready && m, p, Bits(0)) }) + + // Finally, the outputs + io.hrdata := outdata.toBits() + io.hready := ready + io.hresp := HRESP_OKAY +} From 200c69c106c6b3ab9e28008e4ec6142198996fca Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Tue, 24 May 2016 13:28:06 -0700 Subject: [PATCH 087/116] ahb: support hmastlock acquistion of crossbar --- junctions/src/main/scala/hasti.scala | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 3bb58347..e2d47ed1 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -171,7 +171,6 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete } val nSlaves = addressMap.size - // !!! handle hmastlock // Setup diversions infront of each master val diversions = Seq.tabulate(nMasters) { m => Module(new MasterDiversion) } @@ -181,6 +180,10 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete val masters = diversions map (_.io.out) val slaves = io.slaves + // Lock status of the crossbar + val lockedM = Reg(init = Vec.fill(nMasters)(Bool(false))) + val isLocked = lockedM.reduce(_ || _) + // This matrix governs the master-slave connections in the address phase // It is indexed by addressPhaseGrantSM(slave)(master) // It is guaranteed to have at most one 'true' per column and per row @@ -247,11 +250,12 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete .reduce(_ || _) } // Requested access to slaves from masters (pre-arbitration) + // NOTE: quash any request that requires bus ownership or conflicts with isLocked // NOTE: isNSeq does NOT include SEQ; thus, masters who are midburst do not // request access to a new slave. They stay tied to the old and do not get two. // NOTE: if a master was waited, it must repeat the same request as last cycle; // thus, it will request the same slave and not end up with two (unless buggy). - val NSeq = Vec(masters.map(_.isNSeq())) + val NSeq = Vec((lockedM zip masters) map { case(l, m) => m.isNSeq() && ((!isLocked && !m.hmastlock) || l) }) val requestSM = Vec.tabulate(nSlaves) { s => Vec.tabulate(nMasters) { m => matchMS(m)(s) && NSeq(m) && !bubbleM(m) } } // Select at most one master request per slave (lowest index = highest priority) @@ -281,7 +285,7 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete (slaves zip addressPhaseGrantSM) foreach { case (s, g) => { s.htrans := Mux1H(g, masters.map(_.htrans)) // defaults to HTRANS_IDLE (0) s.haddr := Mux1H(g, masters.map(_.haddr)) - s.hmastlock := Mux1H(g, masters.map(_.hmastlock)) // !!! use global crossbar lock state + s.hmastlock := isLocked s.hwrite := Mux1H(g, masters.map(_.hwrite)) s.hsize := Mux1H(g, masters.map(_.hsize)) s.hburst := Mux1H(g, masters.map(_.hburst)) @@ -293,6 +297,20 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete (slaves zip dataPhaseGrantSM) foreach { case (s, g) => { s.hwdata := Mux1H(g, masters.map(_.hwdata)) } } + + // When no master-slave connections are active, a master can take-over the bus + val canLock = !addressPhaseGrantSM.map({ v => v.reduce(_ || _) }).reduce(_ || _) + + // Lowest index highest priority for lock arbitration + val reqLock = masters.map(_.hmastlock) + val winLock = PriorityEncoderOH(reqLock) + + // Lock arbitration + when (isLocked) { + lockedM := (lockedM zip reqLock) map { case (a,b) => a && b } + } .elsewhen (canLock) { + lockedM := winLock + } } class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModule()(p) { From b921bae1070a0ba0054aefa73b3996ac104f286f Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Tue, 24 May 2016 14:14:57 -0700 Subject: [PATCH 088/116] ahb: eliminate trait abused for constants --- junctions/src/main/scala/hasti.scala | 4 +++- junctions/src/main/scala/package.scala | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index e2d47ed1..53637419 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -3,7 +3,7 @@ package junctions import Chisel._ import cde.{Parameters, Field} -trait HastiConstants +object HastiConstants { // Values for htrans val SZ_HTRANS = 2 @@ -42,6 +42,8 @@ trait HastiConstants def dgate(valid: Bool, b: UInt) = Fill(b.getWidth, valid) & b } +import HastiConstants._ + case class HastiParameters(dataBits: Int, addrBits: Int) case object HastiId extends Field[String] case class HastiKey(id: String) extends Field[HastiParameters] diff --git a/junctions/src/main/scala/package.scala b/junctions/src/main/scala/package.scala index 317b7109..214a91e5 100644 --- a/junctions/src/main/scala/package.scala +++ b/junctions/src/main/scala/package.scala @@ -1 +1 @@ -package object junctions extends HastiConstants +package object junctions From a9599302bd312ee6230f396265fb7b433bdac35c Mon Sep 17 00:00:00 2001 From: Donggyu Date: Tue, 24 May 2016 17:10:17 -0700 Subject: [PATCH 089/116] fix cloneType in nasti.scala (#14) --- junctions/src/main/scala/nasti.scala | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index e9324bd3..066ab35d 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -262,12 +262,16 @@ class MemIONastiIOConverter(cacheBlockOffsetBits: Int)(implicit p: Parameters) e io.mem.resp.ready := io.nasti.r.ready } +class NastiArbiterIO(arbN: Int)(implicit p: Parameters) extends Bundle { + val master = Vec(arbN, new NastiIO).flip + val slave = new NastiIO + override def cloneType = + new NastiArbiterIO(arbN).asInstanceOf[this.type] +} + /** Arbitrate among arbN masters requesting to a single slave */ class NastiArbiter(val arbN: Int)(implicit p: Parameters) extends NastiModule { - val io = new Bundle { - val master = Vec(arbN, new NastiIO).flip - val slave = new NastiIO - } + val io = new NastiArbiterIO(arbN) if (arbN > 1) { val arbIdBits = log2Up(arbN) @@ -381,6 +385,13 @@ class NastiErrorSlave(implicit p: Parameters) extends NastiModule { b_queue.io.deq.ready := io.b.ready && !draining } +class NastiRouterIO(nSlaves: Int)(implicit p: Parameters) extends Bundle { + val master = (new NastiIO).flip + val slave = Vec(nSlaves, new NastiIO) + override def cloneType = + new NastiRouterIO(nSlaves).asInstanceOf[this.type] +} + /** Take a single Nasti master and route its requests to various slaves * @param nSlaves the number of slaves * @param routeSel a function which takes an address and produces @@ -388,10 +399,7 @@ class NastiErrorSlave(implicit p: Parameters) extends NastiModule { class NastiRouter(nSlaves: Int, routeSel: UInt => UInt)(implicit p: Parameters) extends NastiModule { - val io = new Bundle { - val master = (new NastiIO).flip - val slave = Vec(nSlaves, new NastiIO) - } + val io = new NastiRouterIO(nSlaves) val ar_route = routeSel(io.master.ar.bits.addr) val aw_route = routeSel(io.master.aw.bits.addr) From 1c8745dfd21d90b0f38399f98be54cb1fc371d56 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 25 May 2016 11:01:59 -0700 Subject: [PATCH 090/116] ahb: backport to chisel2 Merges #16 --- junctions/src/main/scala/hasti.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 53637419..12bc86c7 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -459,9 +459,12 @@ class HastiTestSRAM(depth: Int)(implicit p: Parameters) extends HastiModule()(p) // The mask and address during the address phase val a_request = io.hsel && (io.htrans === HTRANS_NONSEQ || io.htrans === HTRANS_SEQ) - val a_mask = mask_shift(hastiDataBytes-1, 0) + val a_mask = Wire(UInt(width = hastiDataBytes)) val a_address = io.haddr >> UInt(hastiAlignment) val a_write = io.hwrite + + // for backwards compatibility with chisel2, we needed a static width in definition + a_mask := mask_shift(hastiDataBytes-1, 0) // The data phase signals val d_read = RegEnable(a_request && !a_write, Bool(false), ready) From 2ece3e61029a3c27a7cdcc8ded7e2d3cb7699c13 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 26 May 2016 01:02:56 -0700 Subject: [PATCH 091/116] Use Mem for ReorderQueue data This might improve FPGA QoR. --- junctions/src/main/scala/util.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index 23435548..db454181 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -182,7 +182,7 @@ class ReorderQueue[T <: Data](dType: T, tagWidth: Int, size: Int) val deq = new ReorderDequeueIO(dType, tagWidth) } - val roq_data = Reg(Vec(size, dType.cloneType)) + val roq_data = Mem(size, dType.cloneType) val roq_tags = Reg(Vec(size, UInt(width = tagWidth))) val roq_free = Reg(init = Vec.fill(size)(Bool(true))) From a2b9d337b688953b461bffa4f088ad7e4d025a0d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 26 May 2016 01:03:40 -0700 Subject: [PATCH 092/116] No need for full-throughput queues in NastiErrorSlave --- junctions/src/main/scala/nasti.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 066ab35d..2453e0e3 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -342,7 +342,7 @@ class NastiErrorSlave(implicit p: Parameters) extends NastiModule { when (io.ar.fire()) { printf("Invalid read address %x\n", io.ar.bits.addr) } when (io.aw.fire()) { printf("Invalid write address %x\n", io.aw.bits.addr) } - val r_queue = Module(new Queue(new NastiReadAddressChannel, 2)) + val r_queue = Module(new Queue(new NastiReadAddressChannel, 1)) r_queue.io.enq <> io.ar val responding = Reg(init = Bool(false)) @@ -375,7 +375,7 @@ class NastiErrorSlave(implicit p: Parameters) extends NastiModule { when (io.aw.fire()) { draining := Bool(true) } when (io.w.fire() && io.w.bits.last) { draining := Bool(false) } - val b_queue = Module(new Queue(UInt(width = nastiWIdBits), 2)) + val b_queue = Module(new Queue(UInt(width = nastiWIdBits), 1)) b_queue.io.enq.valid := io.aw.valid && !draining b_queue.io.enq.bits := io.aw.bits.id io.aw.ready := b_queue.io.enq.ready && !draining From e036d3a04a7ddb335e89b80017b321dcd7e4efbd Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 26 May 2016 15:59:08 -0700 Subject: [PATCH 093/116] Chisel3: gender issue --- junctions/src/main/scala/hasti.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 12bc86c7..2dd32d64 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -322,8 +322,8 @@ class HastiBus(amap: Seq[UInt=>Bool])(implicit p: Parameters) extends HastiModul } val bar = Module(new HastiXbar(1, amap)) - io.master <> bar.io.masters(0) - io.slaves <> bar.io.slaves + bar.io.masters(0) <> io.master + bar.io.slaves <> io.slaves } class HastiSlaveMux(n: Int)(implicit p: Parameters) extends HastiModule()(p) { From 056d7ec93a7d9f702ee2b1d736f44a38f3da07d7 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 27 May 2016 12:23:18 -0700 Subject: [PATCH 094/116] Drive hmastlock low in Nasti-Hasti converter --- junctions/src/main/scala/hasti.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 2dd32d64..a193bbd1 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -396,6 +396,7 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( io.hasti.hburst := HBURST_INCR io.hasti.hprot := UInt(0) io.hasti.hwdata := data + io.hasti.hmastlock := Bool(false) io.hasti.htrans := MuxLookup(state, HTRANS_IDLE, Seq( s_write -> Mux(io.nasti.w.valid, Mux(first, HTRANS_NONSEQ, HTRANS_SEQ), From 56897f707ab972c5724d6168836fb8ea28e8489f Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 27 May 2016 12:23:39 -0700 Subject: [PATCH 095/116] Don't rely on Mux1H output when no inputs are hot --- junctions/src/main/scala/hasti.scala | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index a193bbd1..550e3a42 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -272,20 +272,23 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete diversions(m).io.divert := bubbleM(m) && NSeq(m) && masters(m).hready } + def dotProduct(g: Seq[Bool], v: Seq[UInt]) = + (g zip v) map { case (gg, ss) => Mux(gg, ss, ss.fromBits(UInt(0))) } reduce (_|_) + // Master muxes (address and data phase are the same) (masters zip (unionGrantMS zip nowhereM)) foreach { case (m, (g, n)) => { // If the master is connected to a slave, the slave determines hready. // However, if no slave is connected, for progress report ready anyway, if: // bad address (swallow request) OR idle (permit stupid slaves to move FSM) val autoready = n || m.isIdle() - m.hready := Mux1H(g, slaves.map(_.hready ^ autoready)) ^ autoready + m.hready := dotProduct(g, slaves.map(_.hready ^ autoready)) ^ autoready m.hrdata := Mux1H(g, slaves.map(_.hrdata)) m.hresp := Mux1H(g, slaves.map(_.hresp)) } } // Slave address phase muxes (slaves zip addressPhaseGrantSM) foreach { case (s, g) => { - s.htrans := Mux1H(g, masters.map(_.htrans)) // defaults to HTRANS_IDLE (0) + s.htrans := dotProduct(g, masters.map(_.htrans)) // defaults to HTRANS_IDLE (0) s.haddr := Mux1H(g, masters.map(_.haddr)) s.hmastlock := isLocked s.hwrite := Mux1H(g, masters.map(_.hwrite)) From d0988902f2b4a8a857ad494ba6ffd80a76254300 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 31 May 2016 19:47:50 -0700 Subject: [PATCH 096/116] fix NASTI -> HASTI bridge --- junctions/src/main/scala/hasti.scala | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 550e3a42..f735ce4a 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -370,6 +370,8 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( require(hastiAddrBits == nastiXAddrBits) require(hastiDataBits == nastiXDataBits) + val r_queue = Module(new Queue(new NastiReadDataChannel, 2)) + val s_idle :: s_read :: s_write :: s_write_resp :: Nil = Enum(Bits(), 4) val state = Reg(init = s_idle) @@ -379,19 +381,26 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( val len = Reg(UInt(width = nastiXLenBits)) val data = Reg(UInt(width = nastiXDataBits)) val first = Reg(init = Bool(false)) - val rvalid = Reg(init = Bool(false)) + val is_rtrans = (state === s_read) && + (io.hasti.htrans === HTRANS_SEQ || + io.hasti.htrans === HTRANS_NONSEQ) + val rvalid = Reg(next = is_rtrans) io.nasti.aw.ready := (state === s_idle) io.nasti.ar.ready := (state === s_idle) && !io.nasti.aw.valid io.nasti.w.ready := (state === s_write) && io.hasti.hready io.nasti.b.valid := (state === s_write_resp) io.nasti.b.bits := NastiWriteResponseChannel(id = id) - io.nasti.r.valid := (state === s_read) && io.hasti.hready && !first - io.nasti.r.bits := NastiReadDataChannel( + io.nasti.r <> r_queue.io.deq + + r_queue.io.enq.valid := io.hasti.hready && rvalid + r_queue.io.enq.bits := NastiReadDataChannel( id = id, data = io.hasti.hrdata, last = (len === UInt(0))) + assert(!r_queue.io.enq.valid || r_queue.io.enq.ready, + "HASTI -> NASTI converter queue overflow") io.hasti.haddr := addr io.hasti.hsize := size @@ -406,7 +415,6 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( Mux(first, HTRANS_IDLE, HTRANS_BUSY)), s_read -> MuxCase(HTRANS_BUSY, Seq( first -> HTRANS_NONSEQ, - (len === UInt(0)) -> HTRANS_IDLE, io.nasti.r.ready -> HTRANS_SEQ)))) when (io.nasti.aw.fire()) { @@ -435,13 +443,9 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( when (io.nasti.b.fire()) { state := s_idle } - when (state === s_read && first) { + when (is_rtrans) { first := Bool(false) addr := addr + (UInt(1) << size) - } - - when (io.nasti.r.fire()) { - addr := addr + (UInt(1) << size) len := len - UInt(1) when (len === UInt(0)) { state := s_idle } } From 53a0e6cb9c3be874fc3711f39f043378fd298b3a Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 1 Jun 2016 11:35:17 -0700 Subject: [PATCH 097/116] another fix for AXI -> AHB converter --- junctions/src/main/scala/hasti.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index f735ce4a..1c98d067 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -384,7 +384,7 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( val is_rtrans = (state === s_read) && (io.hasti.htrans === HTRANS_SEQ || io.hasti.htrans === HTRANS_NONSEQ) - val rvalid = Reg(next = is_rtrans) + val rvalid = Reg(is_rtrans, io.hasti.hready) io.nasti.aw.ready := (state === s_idle) io.nasti.ar.ready := (state === s_idle) && !io.nasti.aw.valid From 8983b0e865781eecee59a0be5b96ce5fcde7ce3d Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 1 Jun 2016 15:01:52 -0700 Subject: [PATCH 098/116] hopefully the last fix for AXI -> AHB converter --- junctions/src/main/scala/hasti.scala | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 1c98d067..4bbf785b 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -122,7 +122,8 @@ class MasterDiversion(implicit p: Parameters) extends HastiModule()(p) { } // If the master is diverted, he must also have been told hready - assert (!io.divert || io.in.hready); + assert (!io.divert || io.in.hready, + "Diverted but not ready"); // Replay the request we diverted io.out.htrans := Mux(full, buffer.htrans, io.in.htrans) @@ -370,7 +371,7 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( require(hastiAddrBits == nastiXAddrBits) require(hastiDataBits == nastiXDataBits) - val r_queue = Module(new Queue(new NastiReadDataChannel, 2)) + val r_queue = Module(new Queue(new NastiReadDataChannel, 2, pipe = true)) val s_idle :: s_read :: s_write :: s_write_resp :: Nil = Enum(Bits(), 4) val state = Reg(init = s_idle) @@ -384,7 +385,7 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( val is_rtrans = (state === s_read) && (io.hasti.htrans === HTRANS_SEQ || io.hasti.htrans === HTRANS_NONSEQ) - val rvalid = Reg(is_rtrans, io.hasti.hready) + val rvalid = RegEnable(is_rtrans, Bool(false), io.hasti.hready) io.nasti.aw.ready := (state === s_idle) io.nasti.ar.ready := (state === s_idle) && !io.nasti.aw.valid @@ -415,7 +416,7 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( Mux(first, HTRANS_IDLE, HTRANS_BUSY)), s_read -> MuxCase(HTRANS_BUSY, Seq( first -> HTRANS_NONSEQ, - io.nasti.r.ready -> HTRANS_SEQ)))) + (r_queue.io.count <= UInt(1)) -> HTRANS_SEQ)))) when (io.nasti.aw.fire()) { first := Bool(true) @@ -443,7 +444,7 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( when (io.nasti.b.fire()) { state := s_idle } - when (is_rtrans) { + when (is_rtrans && io.hasti.hready) { first := Bool(false) addr := addr + (UInt(1) << size) len := len - UInt(1) @@ -463,7 +464,9 @@ class HastiTestSRAM(depth: Int)(implicit p: Parameters) extends HastiModule()(p) val mask_shift = mask_wide.toBits().asUInt() << io.haddr(hastiAlignment-1,0) // The request had better have been aligned! (AHB-lite requires this) - assert ((io.haddr & mask_decode.toBits()(hastiAlignment,1).asUInt) === UInt(0)) + assert (io.htrans === HTRANS_IDLE || io.htrans === HTRANS_BUSY || + (io.haddr & mask_decode.toBits()(hastiAlignment,1).asUInt) === UInt(0), + "HASTI request not aligned") // The mask and address during the address phase val a_request = io.hsel && (io.htrans === HTRANS_NONSEQ || io.htrans === HTRANS_SEQ) From 695be2f0ae8b9a021f8770776af901791d9624a5 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 1 Jun 2016 10:47:02 -0700 Subject: [PATCH 099/116] hasti: work-around unsupported 0-width signals --- junctions/src/main/scala/hasti.scala | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 4bbf785b..e120be9c 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -54,7 +54,7 @@ trait HasHastiParameters { val hastiAddrBits = hastiParams.addrBits val hastiDataBits = hastiParams.dataBits val hastiDataBytes = hastiDataBits/8 - val hastiAlignment = log2Up(hastiDataBytes) + val hastiAlignment = log2Ceil(hastiDataBytes) } abstract class HastiModule(implicit val p: Parameters) extends Module @@ -461,12 +461,15 @@ class HastiTestSRAM(depth: Int)(implicit p: Parameters) extends HastiModule()(p) // Calculate the bitmask of which bytes are being accessed val mask_decode = Vec.tabulate(hastiAlignment+1) (UInt(_) <= io.hsize) val mask_wide = Vec.tabulate(hastiDataBytes) { i => mask_decode(log2Up(i+1)) } - val mask_shift = mask_wide.toBits().asUInt() << io.haddr(hastiAlignment-1,0) + val mask_shift = if (hastiAlignment == 0) UInt(1) else + mask_wide.toBits().asUInt() << io.haddr(hastiAlignment-1,0) // The request had better have been aligned! (AHB-lite requires this) - assert (io.htrans === HTRANS_IDLE || io.htrans === HTRANS_BUSY || - (io.haddr & mask_decode.toBits()(hastiAlignment,1).asUInt) === UInt(0), - "HASTI request not aligned") + if (hastiAlignment >= 1) { + assert (io.htrans === HTRANS_IDLE || io.htrans === HTRANS_BUSY || + (io.haddr & mask_decode.toBits()(hastiAlignment,1).asUInt) === UInt(0), + "HASTI request not aligned") + } // The mask and address during the address phase val a_request = io.hsel && (io.htrans === HTRANS_NONSEQ || io.htrans === HTRANS_SEQ) From 28161cab45af9dba1aa336a09b65e3efb3704ce7 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 3 Jun 2016 13:46:53 -0700 Subject: [PATCH 100/116] Merge AddrHashMap and AddrMap --- junctions/src/main/scala/addrmap.scala | 131 ++++++++++++------------- junctions/src/main/scala/nasti.scala | 32 +++--- 2 files changed, 74 insertions(+), 89 deletions(-) diff --git a/junctions/src/main/scala/addrmap.scala b/junctions/src/main/scala/addrmap.scala index 8eb6ddd7..a223f63a 100644 --- a/junctions/src/main/scala/addrmap.scala +++ b/junctions/src/main/scala/addrmap.scala @@ -16,7 +16,6 @@ case object PPNBits extends Field[Int] case object VPNBits extends Field[Int] case object GlobalAddrMap extends Field[AddrMap] -case object GlobalAddrHashMap extends Field[AddrHashMap] trait HasAddrMapParameters { implicit val p: Parameters @@ -30,23 +29,27 @@ trait HasAddrMapParameters { val pgLevelBits = p(PgLevelBits) val asIdBits = p(ASIdBits) - val addrMap = p(GlobalAddrHashMap) + val addrMap = p(GlobalAddrMap) } case class MemAttr(prot: Int, cacheable: Boolean = false) -abstract class MemRegion { - def align: BigInt +sealed abstract class MemRegion { + def start: BigInt def size: BigInt def numSlaves: Int + def attr: MemAttr + + def containsAddress(x: UInt) = UInt(start) <= x && x < UInt(start + size) } -case class MemSize(size: BigInt, align: BigInt, attr: MemAttr) extends MemRegion { +case class MemSize(size: BigInt, attr: MemAttr) extends MemRegion { + def start = 0 def numSlaves = 1 } -case class MemSubmap(size: BigInt, entries: AddrMap) extends MemRegion { - val numSlaves = entries.countSlaves - val align = entries.computeAlign + +case class MemRange(start: BigInt, size: BigInt, attr: MemAttr) extends MemRegion { + def numSlaves = 1 } object AddrMapProt { @@ -67,90 +70,80 @@ class AddrMapProt extends Bundle { case class AddrMapEntry(name: String, region: MemRegion) -case class AddrHashMapEntry(port: Int, start: BigInt, region: MemRegion) - -class AddrMap(entries: Seq[AddrMapEntry]) extends scala.collection.IndexedSeq[AddrMapEntry] { - private val hash = HashMap(entries.map(e => (e.name, e.region)):_*) - - def apply(index: Int): AddrMapEntry = entries(index) - - def length: Int = entries.size - - def countSlaves: Int = entries.map(_.region.numSlaves).foldLeft(0)(_ + _) - - def computeSize: BigInt = new AddrHashMap(this).size - - def computeAlign: BigInt = entries.map(_.region.align).foldLeft(BigInt(1))(_ max _) - - override def tail: AddrMap = new AddrMap(entries.tail) -} - object AddrMap { def apply(elems: AddrMapEntry*): AddrMap = new AddrMap(elems) } -class AddrHashMap(addrmap: AddrMap, start: BigInt = BigInt(0)) { - private val mapping = HashMap[String, AddrHashMapEntry]() - private val subMaps = HashMap[String, AddrHashMapEntry]() +class AddrMap(entriesIn: Seq[AddrMapEntry], val start: BigInt = BigInt(0)) extends MemRegion { + def isEmpty = entries.isEmpty + def length = entries.size + def numSlaves = entries.map(_.region.numSlaves).foldLeft(0)(_ + _) + def attr = ??? - private def genPairs(am: AddrMap, start: BigInt, startIdx: Int, prefix: String): (BigInt, Int) = { - var ind = startIdx + private val slavePorts = HashMap[String, Int]() + private val mapping = HashMap[String, MemRegion]() + + val (size: BigInt, entries: Seq[AddrMapEntry]) = { + var ind = 0 var base = start - am.foreach { ame => - val name = prefix + ame.name - base = (base + ame.region.align - 1) / ame.region.align * ame.region.align - ame.region match { - case r: MemSize => - mapping += name -> AddrHashMapEntry(ind, base, r) - base += r.size - ind += 1 - case r: MemSubmap => - subMaps += name -> AddrHashMapEntry(-1, base, r) - ind = genPairs(r.entries, base, ind, name + ":")._2 - base += r.size - }} - (base, ind) - } + var rebasedEntries = collection.mutable.ArrayBuffer[AddrMapEntry]() + for (AddrMapEntry(name, r) <- entriesIn) { + if (r.start != 0) { + val align = BigInt(1) << log2Ceil(r.size) + require(r.start >= base, s"region $name base address 0x${r.start.toString(16)} overlaps previous base 0x${base.toString(16)}") + require(r.start % align == 0, s"region $name base address 0x${r.start.toString(16)} not aligned to 0x${align.toString(16)}") + base = r.start + } else { + base = (base + r.size - 1) / r.size * r.size + } - val size = genPairs(addrmap, start, 0, "")._1 + r match { + case r: AddrMap => + val subMap = new AddrMap(r.entries, base) + rebasedEntries += AddrMapEntry(name, subMap) + mapping += name -> subMap + mapping ++= subMap.mapping.map { case (k, v) => s"$name:$k" -> v } + slavePorts ++= subMap.slavePorts.map { case (k, v) => s"$name:$k" -> (ind + v) } + case _ => + val e = MemRange(base, r.size, r.attr) + rebasedEntries += AddrMapEntry(name, e) + mapping += name -> e + slavePorts += name -> ind + } - val sortedEntries: Seq[(String, BigInt, MemSize)] = { - val arr = new Array[(String, BigInt, MemSize)](mapping.size) - mapping.foreach { case (name, AddrHashMapEntry(port, base, region)) => - arr(port) = (name, base, region.asInstanceOf[MemSize]) + ind += r.numSlaves + base += r.size } - arr.toSeq + (base - start, rebasedEntries) } - def nEntries: Int = mapping.size - def apply(name: String): AddrHashMapEntry = mapping.getOrElse(name, subMaps(name)) - def subMap(name: String): (BigInt, AddrMap) = { - val m = subMaps(name) - (m.start, m.region.asInstanceOf[MemSubmap].entries) + val flatten: Seq[(String, MemRange)] = { + val arr = new Array[(String, MemRange)](slavePorts.size) + for ((name, port) <- slavePorts) + arr(port) = (name, mapping(name).asInstanceOf[MemRange]) + arr } - def isInRegion(name: String, addr: UInt): Bool = { - val start = mapping(name).start - val size = mapping(name).region.size - UInt(start) <= addr && addr < UInt(start + size) - } + def apply(name: String): MemRegion = mapping(name) + def port(name: String): Int = slavePorts(name) + def subMap(name: String): AddrMap = mapping(name).asInstanceOf[AddrMap] + def isInRegion(name: String, addr: UInt): Bool = mapping(name).containsAddress(addr) def isCacheable(addr: UInt): Bool = { - sortedEntries.filter(_._3.attr.cacheable).map { case (_, base, region) => - UInt(base) <= addr && addr < UInt(base + region.size) + flatten.filter(_._2.attr.cacheable).map { case (_, region) => + region.containsAddress(addr) }.foldLeft(Bool(false))(_ || _) } def isValid(addr: UInt): Bool = { - sortedEntries.map { case (_, base, region) => - addr >= UInt(base) && addr < UInt(base + region.size) + flatten.map { case (_, region) => + region.containsAddress(addr) }.foldLeft(Bool(false))(_ || _) } def getProt(addr: UInt): AddrMapProt = { - val protForRegion = sortedEntries.map { case (_, base, region) => - val inRegion = addr >= UInt(base) && addr < UInt(base + region.size) - Mux(inRegion, UInt(region.attr.prot, AddrMapProt.SZ), UInt(0)) + val protForRegion = flatten.map { case (_, region) => + Mux(region.containsAddress(addr), UInt(region.attr.prot, AddrMapProt.SZ), UInt(0)) } new AddrMapProt().fromBits(protForRegion.reduce(_|_)) } diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 2453e0e3..f82073bb 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -506,37 +506,29 @@ abstract class NastiInterconnect(implicit p: Parameters) extends NastiModule()(p lazy val io = new NastiInterconnectIO(nMasters, nSlaves) } -class NastiRecursiveInterconnect( - val nMasters: Int, val nSlaves: Int, - addrmap: AddrMap, base: BigInt) +class NastiRecursiveInterconnect(val nMasters: Int, addrMap: AddrMap) (implicit p: Parameters) extends NastiInterconnect()(p) { - val levelSize = addrmap.size + def port(name: String) = io.slaves(addrMap.port(name)) + val nSlaves = addrMap.numSlaves + val routeSel = (addr: UInt) => + Cat(addrMap.entries.map(e => addrMap(e.name).containsAddress(addr)).reverse) - val addrHashMap = new AddrHashMap(addrmap, base) - val routeSel = (addr: UInt) => { - Cat(addrmap.map { case entry => - val hashEntry = addrHashMap(entry.name) - addr >= UInt(hashEntry.start) && addr < UInt(hashEntry.start + hashEntry.region.size) - }.reverse) - } - - val xbar = Module(new NastiCrossbar(nMasters, levelSize, routeSel)) + val xbar = Module(new NastiCrossbar(nMasters, addrMap.length, routeSel)) xbar.io.masters <> io.masters - io.slaves <> addrmap.zip(xbar.io.slaves).flatMap { + io.slaves <> addrMap.entries.zip(xbar.io.slaves).flatMap { case (entry, xbarSlave) => { entry.region match { - case _: MemSize => - Some(xbarSlave) - case MemSubmap(_, submap) if submap.isEmpty => + case submap: AddrMap if submap.entries.isEmpty => val err_slave = Module(new NastiErrorSlave) err_slave.io <> xbarSlave None - case MemSubmap(_, submap) => - val subSlaves = submap.countSlaves - val ic = Module(new NastiRecursiveInterconnect(1, subSlaves, submap, addrHashMap(entry.name).start)) + case submap: AddrMap => + val ic = Module(new NastiRecursiveInterconnect(1, submap)) ic.io.masters.head <> xbarSlave ic.io.slaves + case r: MemRange => + Some(xbarSlave) } } } From 636a46c05288b418a6289f0139df680cfc2f7e5e Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 8 Jun 2016 10:02:21 -0700 Subject: [PATCH 101/116] make sure SlowIO clock divider is initialized on reset --- junctions/src/main/scala/slowio.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/slowio.scala b/junctions/src/main/scala/slowio.scala index 7e926918..6711ea82 100644 --- a/junctions/src/main/scala/slowio.scala +++ b/junctions/src/main/scala/slowio.scala @@ -26,8 +26,8 @@ class SlowIO[T <: Data](val divisor_max: Int)(data: => T) extends Module } io.divisor := (hold << 16) | divisor - val count = Reg{UInt(width = log2Up(divisor_max))} - val myclock = Reg{Bool()} + val count = Reg(init = UInt(0, log2Up(divisor_max))) + val myclock = Reg(init = Bool(false)) count := count + UInt(1) val rising = count === (divisor >> 1) From 0969be880477a3daa21df9a7290a1393c6f3463b Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 8 Jun 2016 13:45:30 -0700 Subject: [PATCH 102/116] Revert "make sure SlowIO clock divider is initialized on reset" This reverts commit 546aaad8cfb03e45e068733c2b694232bcf9dcdb. --- junctions/src/main/scala/slowio.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/slowio.scala b/junctions/src/main/scala/slowio.scala index 6711ea82..7e926918 100644 --- a/junctions/src/main/scala/slowio.scala +++ b/junctions/src/main/scala/slowio.scala @@ -26,8 +26,8 @@ class SlowIO[T <: Data](val divisor_max: Int)(data: => T) extends Module } io.divisor := (hold << 16) | divisor - val count = Reg(init = UInt(0, log2Up(divisor_max))) - val myclock = Reg(init = Bool(false)) + val count = Reg{UInt(width = log2Up(divisor_max))} + val myclock = Reg{Bool()} count := count + UInt(1) val rising = count === (divisor >> 1) From 3393d4362b988fa92e2332ea9016d1738141d15f Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 8 Jun 2016 13:25:11 -0700 Subject: [PATCH 103/116] hasti: fix test SRAM depth --- junctions/src/main/scala/hasti.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index e120be9c..991e3da9 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -474,7 +474,7 @@ class HastiTestSRAM(depth: Int)(implicit p: Parameters) extends HastiModule()(p) // The mask and address during the address phase val a_request = io.hsel && (io.htrans === HTRANS_NONSEQ || io.htrans === HTRANS_SEQ) val a_mask = Wire(UInt(width = hastiDataBytes)) - val a_address = io.haddr >> UInt(hastiAlignment) + val a_address = io.haddr(depth-1, hastiAlignment) val a_write = io.hwrite // for backwards compatibility with chisel2, we needed a static width in definition @@ -507,7 +507,7 @@ class HastiTestSRAM(depth: Int)(implicit p: Parameters) extends HastiModule()(p) val p_wdata = holdUnless(d_wdata, p_latch_d) // Use single-ported memory with byte-write enable - val mem = SeqMem(depth, Vec(hastiDataBytes, Bits(width = 8))) + val mem = SeqMem(1 << (depth-hastiAlignment), Vec(hastiDataBytes, Bits(width = 8))) // Decide is the SRAM port is used for reading or (potentially) writing val read = ready && a_request && !a_write From ad4e4f19be9ab5752cc3a9659763bb5e7d951ed8 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 8 Jun 2016 13:39:22 -0700 Subject: [PATCH 104/116] Revert "Don't rely on Mux1H output when no inputs are hot" This reverts commit b912b7cd1263d7f3b63e6fcb052d9d7493d1b970. --- junctions/src/main/scala/hasti.scala | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 991e3da9..127bd769 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -273,23 +273,20 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete diversions(m).io.divert := bubbleM(m) && NSeq(m) && masters(m).hready } - def dotProduct(g: Seq[Bool], v: Seq[UInt]) = - (g zip v) map { case (gg, ss) => Mux(gg, ss, ss.fromBits(UInt(0))) } reduce (_|_) - // Master muxes (address and data phase are the same) (masters zip (unionGrantMS zip nowhereM)) foreach { case (m, (g, n)) => { // If the master is connected to a slave, the slave determines hready. // However, if no slave is connected, for progress report ready anyway, if: // bad address (swallow request) OR idle (permit stupid slaves to move FSM) val autoready = n || m.isIdle() - m.hready := dotProduct(g, slaves.map(_.hready ^ autoready)) ^ autoready + m.hready := Mux1H(g, slaves.map(_.hready ^ autoready)) ^ autoready m.hrdata := Mux1H(g, slaves.map(_.hrdata)) m.hresp := Mux1H(g, slaves.map(_.hresp)) } } // Slave address phase muxes (slaves zip addressPhaseGrantSM) foreach { case (s, g) => { - s.htrans := dotProduct(g, masters.map(_.htrans)) // defaults to HTRANS_IDLE (0) + s.htrans := Mux1H(g, masters.map(_.htrans)) // defaults to HTRANS_IDLE (0) s.haddr := Mux1H(g, masters.map(_.haddr)) s.hmastlock := isLocked s.hwrite := Mux1H(g, masters.map(_.hwrite)) From ed9fcea7f8df1827aa7b154948817a44d8d88c12 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Wed, 8 Jun 2016 14:35:22 -0700 Subject: [PATCH 105/116] hasti: correct fix to locking --- junctions/src/main/scala/hasti.scala | 29 ++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 127bd769..12873eff 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -252,14 +252,18 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete Vec.tabulate(nSlaves) { s => dataPhaseGrantSM(s)(m) && !matchMS(m)(s) } .reduce(_ || _) } + // Block any request that requires bus ownership or conflicts with isLocked + val blockedM = + Vec((lockedM zip masters) map { case(l, m) => !l && (isLocked || m.hmastlock) }) + // Requested access to slaves from masters (pre-arbitration) - // NOTE: quash any request that requires bus ownership or conflicts with isLocked // NOTE: isNSeq does NOT include SEQ; thus, masters who are midburst do not // request access to a new slave. They stay tied to the old and do not get two. // NOTE: if a master was waited, it must repeat the same request as last cycle; // thus, it will request the same slave and not end up with two (unless buggy). - val NSeq = Vec((lockedM zip masters) map { case(l, m) => m.isNSeq() && ((!isLocked && !m.hmastlock) || l) }) - val requestSM = Vec.tabulate(nSlaves) { s => Vec.tabulate(nMasters) { m => matchMS(m)(s) && NSeq(m) && !bubbleM(m) } } + val NSeq = masters.map(_.isNSeq()) + val requestSM = Vec.tabulate(nSlaves) { s => Vec.tabulate(nMasters) { m => + matchMS(m)(s) && NSeq(m) && !bubbleM(m) && !blockedM(m) } } // Select at most one master request per slave (lowest index = highest priority) val selectedRequestSM = Vec(requestSM map { m => Vec(PriorityEncoderOH(m)) }) @@ -268,25 +272,26 @@ class HastiXbar(nMasters: Int, addressMap: Seq[UInt=>Bool])(implicit p: Paramete addressPhaseGrantSM := Vec((holdS zip (priorAddressPhaseGrantSM zip selectedRequestSM)) map { case (h, (p, r)) => Mux(h, p, r) }) - // If we diverted a master, we need to absorb his address phase to replay later for (m <- 0 until nMasters) { - diversions(m).io.divert := bubbleM(m) && NSeq(m) && masters(m).hready + // If the master is connected to a slave, the slave determines hready. + // However, if no slave is connected, for progress report ready anyway, if: + // bad address (swallow request) OR idle (permit stupid masters to move FSM) + val autoready = nowhereM(m) || masters(m).isIdle() + val hready = Mux1H(unionGrantMS(m), slaves.map(_.hready ^ autoready)) ^ autoready + masters(m).hready := hready + // If we diverted a master, we need to absorb his address phase to replay later + diversions(m).io.divert := (bubbleM(m) || blockedM(m)) && NSeq(m) && hready } // Master muxes (address and data phase are the same) - (masters zip (unionGrantMS zip nowhereM)) foreach { case (m, (g, n)) => { - // If the master is connected to a slave, the slave determines hready. - // However, if no slave is connected, for progress report ready anyway, if: - // bad address (swallow request) OR idle (permit stupid slaves to move FSM) - val autoready = n || m.isIdle() - m.hready := Mux1H(g, slaves.map(_.hready ^ autoready)) ^ autoready + (masters zip unionGrantMS) foreach { case (m, g) => { m.hrdata := Mux1H(g, slaves.map(_.hrdata)) m.hresp := Mux1H(g, slaves.map(_.hresp)) } } // Slave address phase muxes (slaves zip addressPhaseGrantSM) foreach { case (s, g) => { - s.htrans := Mux1H(g, masters.map(_.htrans)) // defaults to HTRANS_IDLE (0) + s.htrans := Mux1H(g, masters.map(_.htrans)) s.haddr := Mux1H(g, masters.map(_.haddr)) s.hmastlock := isLocked s.hwrite := Mux1H(g, masters.map(_.hwrite)) From 96f09003f2fd216e91563aaa4360f0cb108af27f Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Tue, 5 Jul 2016 16:03:25 -0700 Subject: [PATCH 106/116] use options for NastiWriteDataChannel write mask --- junctions/src/main/scala/atos.scala | 2 +- junctions/src/main/scala/nasti.scala | 11 +++-------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/junctions/src/main/scala/atos.scala b/junctions/src/main/scala/atos.scala index ee78c853..970a1f5c 100644 --- a/junctions/src/main/scala/atos.scala +++ b/junctions/src/main/scala/atos.scala @@ -246,7 +246,7 @@ class AtosRequestDecoder(implicit p: Parameters) extends AtosModule()(p) { io.w.bits := NastiWriteDataChannel( id = io.req.bits.id(), data = io.req.bits.data(), - strb = io.req.bits.strb(), + strb = Some(io.req.bits.strb()), last = io.req.bits.last()) io.req.ready := (io.ar.ready && is_ar) || diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index f82073bb..564b08f5 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -168,21 +168,16 @@ object NastiReadAddressChannel { } object NastiWriteDataChannel { - def apply(data: UInt, last: Bool = Bool(true), id: UInt = UInt(0)) + def apply(data: UInt, strb: Option[UInt] = None, + last: Bool = Bool(true), id: UInt = UInt(0)) (implicit p: Parameters): NastiWriteDataChannel = { val w = Wire(new NastiWriteDataChannel) - w.strb := Fill(w.nastiWStrobeBits, UInt(1, 1)) + w.strb := strb.getOrElse(Fill(w.nastiWStrobeBits, UInt(1, 1))) w.data := data w.last := last w.user := UInt(0) w } - def apply(data: UInt, strb: UInt, last: Bool, id: UInt) - (implicit p: Parameters): NastiWriteDataChannel = { - val w = apply(data, last, id) - w.strb := strb - w - } } object NastiReadDataChannel { From bbf780725f15753886ebcc66f10fa2a1ccaa0973 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 6 Jul 2016 15:13:04 -0700 Subject: [PATCH 107/116] add NastiReadIO and NastiWriteIO bundles --- junctions/src/main/scala/nasti.scala | 11 +++++ junctions/src/main/scala/smi.scala | 70 +++++++++++++--------------- 2 files changed, 43 insertions(+), 38 deletions(-) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 564b08f5..0e9b6a59 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -71,6 +71,17 @@ trait HasNastiData extends HasNastiParameters { val last = Bool() } +class NastiReadIO(implicit val p: Parameters) extends ParameterizedBundle()(p) { + val ar = Decoupled(new NastiReadAddressChannel) + val r = Decoupled(new NastiReadDataChannel).flip +} + +class NastiWriteIO(implicit val p: Parameters) extends ParameterizedBundle()(p) { + val aw = Decoupled(new NastiWriteAddressChannel) + val w = Decoupled(new NastiWriteDataChannel) + val b = Decoupled(new NastiWriteResponseChannel).flip +} + class NastiIO(implicit val p: Parameters) extends ParameterizedBundle()(p) { val aw = Decoupled(new NastiWriteAddressChannel) val w = Decoupled(new NastiWriteDataChannel) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index 3832788e..b804e104 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -91,8 +91,7 @@ class SmiArbiter(val n: Int, val dataWidth: Int, val addrWidth: Int) class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) (implicit p: Parameters) extends NastiModule()(p) { val io = new Bundle { - val ar = Decoupled(new NastiReadAddressChannel).flip - val r = Decoupled(new NastiReadDataChannel) + val nasti = new NastiReadIO().flip val smi = new SmiIO(dataWidth, addrWidth) } @@ -118,7 +117,7 @@ class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) val buffer = Reg(init = Vec.fill(maxWordsPerBeat) { Bits(0, dataWidth) }) - io.ar.ready := (state === s_idle) + io.nasti.ar.ready := (state === s_idle) io.smi.req.valid := (state === s_read) && !sendDone io.smi.req.bits.rw := Bool(false) @@ -126,22 +125,22 @@ class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) io.smi.resp.ready := (state === s_read) - io.r.valid := (state === s_resp) - io.r.bits := NastiReadDataChannel( + io.nasti.r.valid := (state === s_resp) + io.nasti.r.bits := NastiReadDataChannel( id = id, data = buffer.toBits, last = (nBeats === UInt(0))) - when (io.ar.fire()) { - when (io.ar.bits.size < UInt(byteOffBits)) { + when (io.nasti.ar.fire()) { + when (io.nasti.ar.bits.size < UInt(byteOffBits)) { nWords := UInt(0) } .otherwise { - nWords := calcWordCount(io.ar.bits.size) + nWords := calcWordCount(io.nasti.ar.bits.size) } - nBeats := io.ar.bits.len - addr := io.ar.bits.addr(addrOffBits - 1, byteOffBits) - recvInd := io.ar.bits.addr(wordCountBits + byteOffBits - 1, byteOffBits) - id := io.ar.bits.id + nBeats := io.nasti.ar.bits.len + addr := io.nasti.ar.bits.addr(addrOffBits - 1, byteOffBits) + recvInd := io.nasti.ar.bits.addr(wordCountBits + byteOffBits - 1, byteOffBits) + id := io.nasti.ar.bits.id state := s_read } @@ -157,22 +156,20 @@ class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) when (nWords === UInt(0)) { state := s_resp } } - when (io.r.fire()) { + when (io.nasti.r.fire()) { recvInd := UInt(0) sendDone := Bool(false) // clear all the registers in the buffer buffer.foreach(_ := Bits(0)) nBeats := nBeats - UInt(1) - state := Mux(io.r.bits.last, s_idle, s_read) + state := Mux(io.nasti.r.bits.last, s_idle, s_read) } } class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) (implicit p: Parameters) extends NastiModule()(p) { val io = new Bundle { - val aw = Decoupled(new NastiWriteAddressChannel).flip - val w = Decoupled(new NastiWriteDataChannel).flip - val b = Decoupled(new NastiWriteResponseChannel) + val nasti = new NastiWriteIO().flip val smi = new SmiIO(dataWidth, addrWidth) } @@ -182,7 +179,7 @@ class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) private val addrOffBits = addrWidth + byteOffBits private val nastiByteOffBits = log2Ceil(nastiXDataBits / 8) - assert(!io.aw.valid || io.aw.bits.size >= UInt(byteOffBits), + assert(!io.nasti.aw.valid || io.nasti.aw.bits.size >= UInt(byteOffBits), "Nasti size must be >= Smi size") val id = Reg(UInt(width = nastiWIdBits)) @@ -203,39 +200,39 @@ class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) val s_idle :: s_data :: s_send :: s_ack :: s_resp :: Nil = Enum(Bits(), 5) val state = Reg(init = s_idle) - io.aw.ready := (state === s_idle) - io.w.ready := (state === s_data) + io.nasti.aw.ready := (state === s_idle) + io.nasti.w.ready := (state === s_data) io.smi.req.valid := (state === s_send) && strb(0) io.smi.req.bits.rw := Bool(true) io.smi.req.bits.addr := addr io.smi.req.bits.data := data(dataWidth - 1, 0) io.smi.resp.ready := (state === s_ack) - io.b.valid := (state === s_resp) - io.b.bits := NastiWriteResponseChannel(id) + io.nasti.b.valid := (state === s_resp) + io.nasti.b.bits := NastiWriteResponseChannel(id) val jump = if (maxWordsPerBeat > 1) PriorityMux(strb(maxWordsPerBeat - 1, 1), (1 until maxWordsPerBeat).map(UInt(_))) else UInt(1) - when (io.aw.fire()) { + when (io.nasti.aw.fire()) { if (dataWidth == nastiXDataBits) { - addr := io.aw.bits.addr(addrOffBits - 1, byteOffBits) + addr := io.nasti.aw.bits.addr(addrOffBits - 1, byteOffBits) } else { - addr := Cat(io.aw.bits.addr(addrOffBits - 1, nastiByteOffBits), + addr := Cat(io.nasti.aw.bits.addr(addrOffBits - 1, nastiByteOffBits), UInt(0, nastiByteOffBits - byteOffBits)) } - offset := io.aw.bits.addr(nastiByteOffBits - 1, 0) - id := io.aw.bits.id - size := io.aw.bits.size + offset := io.nasti.aw.bits.addr(nastiByteOffBits - 1, 0) + id := io.nasti.aw.bits.id + size := io.nasti.aw.bits.size last := Bool(false) state := s_data } - when (io.w.fire()) { - last := io.w.bits.last - strb := makeStrobe(offset, size, io.w.bits.strb) - data := io.w.bits.data + when (io.nasti.w.fire()) { + last := io.nasti.w.bits.last + strb := makeStrobe(offset, size, io.nasti.w.bits.strb) + data := io.nasti.w.bits.data state := s_send } @@ -251,7 +248,7 @@ class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) when (io.smi.resp.fire()) { state := s_resp } - when (io.b.fire()) { state := s_idle } + when (io.nasti.b.fire()) { state := s_idle } } /** Convert Nasti protocol to Smi protocol */ @@ -267,13 +264,10 @@ class SmiIONastiIOConverter(val dataWidth: Int, val addrWidth: Int) "SMI data width must be less than or equal to NASTI data width") val reader = Module(new SmiIONastiReadIOConverter(dataWidth, addrWidth)) - reader.io.ar <> io.nasti.ar - io.nasti.r <> reader.io.r + reader.io.nasti <> io.nasti val writer = Module(new SmiIONastiWriteIOConverter(dataWidth, addrWidth)) - writer.io.aw <> io.nasti.aw - writer.io.w <> io.nasti.w - io.nasti.b <> writer.io.b + writer.io.nasti <> io.nasti val arb = Module(new SmiArbiter(2, dataWidth, addrWidth)) arb.io.in(0) <> reader.io.smi From 60554825135f0dfa34e6fccf7b912eb935b4a016 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Thu, 7 Jul 2016 12:12:39 -0700 Subject: [PATCH 108/116] make sure write channel id is actually set --- junctions/src/main/scala/nasti.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 0e9b6a59..49248119 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -186,6 +186,7 @@ object NastiWriteDataChannel { w.strb := strb.getOrElse(Fill(w.nastiWStrobeBits, UInt(1, 1))) w.data := data w.last := last + w.id := id w.user := UInt(0) w } From 2f70136f905fcfa5b651eb1061a91e0bd39bc50a Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Wed, 15 Jun 2016 21:54:59 -0700 Subject: [PATCH 109/116] Fix the Nasti to Smi Converter for single-word Nasti busses There's a register that tracks what word within a Nasti transaction a Smi response cooresponds to, since Smi itself doesn't have any multi-word stuff. This breaks the single-word Nasti to Smi converter due to what's essentially a 0-width wire bug: it ends up doing something like word_offset_into_nasti := nasti_address(3, 3) when "word_offset_into_nasti" should really be a 0-bit register, but due to some log2Up block size calculation logic it's actually a 1-bit register. Thus, this expression ends up grabbing a bit of the address, which causes odd addresses to get buffered incorrectly. My fix is to just special-case the "Nasti bus width is the same as Smi bus width" case. --- junctions/src/main/scala/smi.scala | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index b804e104..ce1fbe70 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -139,7 +139,10 @@ class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int) } nBeats := io.nasti.ar.bits.len addr := io.nasti.ar.bits.addr(addrOffBits - 1, byteOffBits) - recvInd := io.nasti.ar.bits.addr(wordCountBits + byteOffBits - 1, byteOffBits) + if (maxWordsPerBeat > 1) + recvInd := io.nasti.ar.bits.addr(wordCountBits + byteOffBits - 1, byteOffBits) + else + recvInd := UInt(0) id := io.nasti.ar.bits.id state := s_read } From c0dc09b3a1f81d50c6d93f7fa8bb25d1007808b3 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 13 Jul 2016 11:08:15 -0700 Subject: [PATCH 110/116] don't use CAM in ReorderQueue if not necessary --- junctions/src/main/scala/util.scala | 59 ++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index db454181..f30f1c65 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -175,34 +175,57 @@ class ReorderDequeueIO[T <: Data](dType: T, tagWidth: Int) extends Bundle { new ReorderDequeueIO(dType, tagWidth).asInstanceOf[this.type] } -class ReorderQueue[T <: Data](dType: T, tagWidth: Int, size: Int) +class ReorderQueue[T <: Data](dType: T, tagWidth: Int, size: Option[Int] = None) extends Module { val io = new Bundle { val enq = new ReorderEnqueueIO(dType, tagWidth).flip val deq = new ReorderDequeueIO(dType, tagWidth) } - val roq_data = Mem(size, dType.cloneType) - val roq_tags = Reg(Vec(size, UInt(width = tagWidth))) - val roq_free = Reg(init = Vec.fill(size)(Bool(true))) + val tagSpaceSize = 1 << tagWidth + val actualSize = size.getOrElse(tagSpaceSize) - val roq_enq_addr = PriorityEncoder(roq_free) - val roq_matches = roq_tags.zip(roq_free) - .map { case (tag, free) => tag === io.deq.tag && !free } - val roq_deq_addr = PriorityEncoder(roq_matches) + if (tagSpaceSize > actualSize) { + val roq_data = Mem(actualSize, dType) + val roq_tags = Reg(Vec(actualSize, UInt(width = tagWidth))) + val roq_free = Reg(init = Vec.fill(actualSize)(Bool(true))) - io.enq.ready := roq_free.reduce(_ || _) - io.deq.data := roq_data(roq_deq_addr) - io.deq.matches := roq_matches.reduce(_ || _) + val roq_enq_addr = PriorityEncoder(roq_free) + val roq_matches = roq_tags.zip(roq_free) + .map { case (tag, free) => tag === io.deq.tag && !free } + val roq_deq_addr = PriorityEncoder(roq_matches) - when (io.enq.valid && io.enq.ready) { - roq_data(roq_enq_addr) := io.enq.bits.data - roq_tags(roq_enq_addr) := io.enq.bits.tag - roq_free(roq_enq_addr) := Bool(false) - } + io.enq.ready := roq_free.reduce(_ || _) + io.deq.data := roq_data(roq_deq_addr) + io.deq.matches := roq_matches.reduce(_ || _) - when (io.deq.valid) { - roq_free(roq_deq_addr) := Bool(true) + when (io.enq.valid && io.enq.ready) { + roq_data(roq_enq_addr) := io.enq.bits.data + roq_tags(roq_enq_addr) := io.enq.bits.tag + roq_free(roq_enq_addr) := Bool(false) + } + + when (io.deq.valid) { + roq_free(roq_deq_addr) := Bool(true) + } + + println("Warning: inferring a CAM for ReorderQueue") + } else { + val roq_data = Mem(tagSpaceSize, dType) + val roq_free = Reg(init = Vec.fill(tagSpaceSize)(Bool(true))) + + io.enq.ready := roq_free(io.enq.bits.tag) + io.deq.data := roq_data(io.deq.tag) + io.deq.matches := !roq_free(io.deq.tag) + + when (io.enq.valid && io.enq.ready) { + roq_data(io.enq.bits.tag) := io.enq.bits.data + roq_free(io.enq.bits.tag) := Bool(false) + } + + when (io.deq.valid) { + roq_free(io.deq.tag) := Bool(true) + } } } From 37fd11870c1b8a48f0c26623de3744f42304e721 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 13 Jul 2016 12:11:43 -0700 Subject: [PATCH 111/116] fix up ReorderQueue CAM --- junctions/src/main/scala/util.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/junctions/src/main/scala/util.scala b/junctions/src/main/scala/util.scala index f30f1c65..f7a19ec8 100644 --- a/junctions/src/main/scala/util.scala +++ b/junctions/src/main/scala/util.scala @@ -186,17 +186,17 @@ class ReorderQueue[T <: Data](dType: T, tagWidth: Int, size: Option[Int] = None) val actualSize = size.getOrElse(tagSpaceSize) if (tagSpaceSize > actualSize) { - val roq_data = Mem(actualSize, dType) + val roq_data = Reg(Vec(actualSize, dType)) val roq_tags = Reg(Vec(actualSize, UInt(width = tagWidth))) val roq_free = Reg(init = Vec.fill(actualSize)(Bool(true))) val roq_enq_addr = PriorityEncoder(roq_free) val roq_matches = roq_tags.zip(roq_free) .map { case (tag, free) => tag === io.deq.tag && !free } - val roq_deq_addr = PriorityEncoder(roq_matches) + val roq_deq_onehot = PriorityEncoderOH(roq_matches) io.enq.ready := roq_free.reduce(_ || _) - io.deq.data := roq_data(roq_deq_addr) + io.deq.data := Mux1H(roq_deq_onehot, roq_data) io.deq.matches := roq_matches.reduce(_ || _) when (io.enq.valid && io.enq.ready) { @@ -206,10 +206,10 @@ class ReorderQueue[T <: Data](dType: T, tagWidth: Int, size: Option[Int] = None) } when (io.deq.valid) { - roq_free(roq_deq_addr) := Bool(true) + roq_free(OHToUInt(roq_deq_onehot)) := Bool(true) } - println("Warning: inferring a CAM for ReorderQueue") + println(s"Warning - using a CAM for ReorderQueue, tagBits: ${tagWidth} size: ${actualSize}") } else { val roq_data = Mem(tagSpaceSize, dType) val roq_free = Reg(init = Vec.fill(tagSpaceSize)(Bool(true))) From c33c0944be8e8cd8a2b2485ec68f797dd92d645b Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Tue, 12 Jul 2016 19:41:10 -0700 Subject: [PATCH 112/116] crossing: first clock crossing, the handshaker --- junctions/src/main/scala/crossing.scala | 150 ++++++++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 junctions/src/main/scala/crossing.scala diff --git a/junctions/src/main/scala/crossing.scala b/junctions/src/main/scala/crossing.scala new file mode 100644 index 00000000..a4f974c0 --- /dev/null +++ b/junctions/src/main/scala/crossing.scala @@ -0,0 +1,150 @@ +package junctions +import Chisel._ + +class Crossing[T <: Data](gen: T, enq_sync: Boolean, deq_sync: Boolean) extends Bundle { + val enq = Decoupled(gen.cloneType).flip() + val deq = Decoupled(gen.cloneType) + val enq_clock = if (enq_sync) Some(Clock(INPUT)) else None + val deq_clock = if (deq_sync) Some(Clock(INPUT)) else None + val enq_reset = if (enq_sync) Some(Bool(INPUT)) else None + val deq_reset = if (deq_sync) Some(Bool(INPUT)) else None +} + +// Output is 1 for one cycle after any edge of 'in' +object AsyncHandshakePulse { + def apply(in: Bool, sync: Int): Bool = { + val syncv = RegInit(Vec.fill(sync+1){Bool(false)}) + syncv.last := in + (syncv.init zip syncv.tail).foreach { case (sink, source) => sink := source } + syncv(0) =/= syncv(1) + } +} + +class AsyncHandshakeSource[T <: Data](gen: T, sync: Int, clock: Clock, reset: Bool) + extends Module(_clock = clock, _reset = reset) { + val io = new Bundle { + // These come from the source clock domain + val enq = Decoupled(gen.cloneType).flip() + // These cross to the sink clock domain + val bits = gen.cloneType + val push = Bool(OUTPUT) + val pop = Bool(INPUT) + } + + val ready = RegInit(Bool(true)) + val bits = Reg(gen.cloneType) + val push = RegInit(Bool(false)) + + io.enq.ready := ready + io.bits := bits + io.push := push + + val pop = AsyncHandshakePulse(io.pop, sync) + assert (!pop || !ready) + + when (pop) { + ready := Bool(true) + } + + when (io.enq.fire()) { + ready := Bool(false) + bits := io.enq.bits + push := !push + } +} + +class AsyncHandshakeSink[T <: Data](gen: T, sync: Int, clock: Clock, reset: Bool) + extends Module(_clock = clock, _reset = reset) { + val io = new Bundle { + // These cross to the source clock domain + val bits = gen.cloneType.flip() + val push = Bool(INPUT) + val pop = Bool(OUTPUT) + // These go to the sink clock domain + val deq = Decoupled(gen.cloneType) + } + + val valid = RegInit(Bool(false)) + val bits = Reg(gen.cloneType) + val pop = RegInit(Bool(false)) + + io.deq.valid := valid + io.deq.bits := bits + io.pop := pop + + val push = AsyncHandshakePulse(io.push, sync) + assert (!push || !valid) + + when (push) { + valid := Bool(true) + bits := io.bits + } + + when (io.deq.fire()) { + valid := Bool(false) + pop := !pop + } +} + +class AsyncHandshake[T <: Data](gen: T, sync: Int = 2) extends Module { + val io = new Crossing(gen, true, true) + require (sync >= 2) + + val source = Module(new AsyncHandshakeSource(gen, sync, io.enq_clock.get, io.enq_reset.get)) + val sink = Module(new AsyncHandshakeSink (gen, sync, io.deq_clock.get, io.deq_reset.get)) + + source.io.enq <> io.enq + io.deq <> sink.io.deq + + sink.io.bits := source.io.bits + sink.io.push := source.io.push + source.io.pop := sink.io.pop +} + +class AsyncDecoupledTo[T <: Data](gen: T, depth: Int = 0, sync: Int = 2) extends Module { + val io = new Crossing(gen, false, true) + + // !!! if depth == 0 { use Handshake } else { use AsyncFIFO } + val crossing = Module(new AsyncHandshake(gen, sync)).io + crossing.enq_clock.get := clock + crossing.enq_reset.get := reset + crossing.enq <> io.enq + crossing.deq_clock.get := io.deq_clock.get + crossing.deq_reset.get := io.deq_reset.get + io.deq <> crossing.deq +} + +object AsyncDecoupledTo { + // source is in our clock domain, output is in the 'to' clock domain + def apply[T <: Data](to_clock: Clock, to_reset: Bool, source: DecoupledIO[T], depth: Int = 0, sync: Int = 2): DecoupledIO[T] = { + val to = Module(new AsyncDecoupledTo(source.bits, depth, sync)) + to.io.deq_clock.get := to_clock + to.io.deq_reset.get := to_reset + to.io.enq <> source + to.io.deq + } +} + +class AsyncDecoupledFrom[T <: Data](gen: T, depth: Int = 0, sync: Int = 2) extends Module { + val io = new Crossing(gen, true, false) + + // !!! if depth == 0 { use Handshake } else { use AsyncFIFO } + val crossing = Module(new AsyncHandshake(gen, sync)).io + crossing.enq_clock.get := io.enq_clock.get + crossing.enq_reset.get := io.enq_reset.get + crossing.enq <> io.enq + crossing.deq_clock.get := clock + crossing.deq_reset.get := reset + io.deq <> crossing.deq +} + +object AsyncDecoupledFrom { + // source is in the 'from' clock domain, output is in our clock domain + def apply[T <: Data](from_clock: Clock, from_reset: Bool, source: DecoupledIO[T], depth: Int = 0, sync: Int = 2): DecoupledIO[T] = { + val from = Module(new AsyncDecoupledFrom(source.bits, depth, sync)) + from.io.enq_clock.get := from_clock + from.io.enq_reset.get := from_reset + from.io.enq <> source + from.io.deq + } +} From eeae74e3fc30eca40b13e45d55651f2dadf38835 Mon Sep 17 00:00:00 2001 From: "Wesley W. Terpstra" Date: Tue, 12 Jul 2016 19:41:40 -0700 Subject: [PATCH 113/116] nasti: include convenient clock crossing helpers --- junctions/src/main/scala/nasti.scala | 30 ++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/junctions/src/main/scala/nasti.scala b/junctions/src/main/scala/nasti.scala index 49248119..41251ac3 100644 --- a/junctions/src/main/scala/nasti.scala +++ b/junctions/src/main/scala/nasti.scala @@ -705,3 +705,33 @@ class NastiMemoryDemux(nRoutes: Int)(implicit p: Parameters) extends NastiModule connectRespChannel(i, io.master.b, slave.b) } } + +object AsyncNastiTo { + // source(master) is in our clock domain, output is in the 'to' clock domain + def apply[T <: Data](to_clock: Clock, to_reset: Bool, source: NastiIO, depth: Int = 3, sync: Int = 2)(implicit p: Parameters): NastiIO = { + val sink = Wire(new NastiIO) + + sink.aw <> AsyncDecoupledTo(to_clock, to_reset, source.aw, depth, sync) + sink.ar <> AsyncDecoupledTo(to_clock, to_reset, source.ar, depth, sync) + sink.w <> AsyncDecoupledTo(to_clock, to_reset, source.w, depth, sync) + source.b <> AsyncDecoupledFrom(to_clock, to_reset, sink.b, depth, sync) + source.r <> AsyncDecoupledFrom(to_clock, to_reset, sink.r, depth, sync) + + sink + } +} + +object AsyncNastiFrom { + // source(master) is in the 'from' clock domain, output is in our clock domain + def apply[T <: Data](from_clock: Clock, from_reset: Bool, source: NastiIO, depth: Int = 3, sync: Int = 2)(implicit p: Parameters): NastiIO = { + val sink = Wire(new NastiIO) + + sink.aw <> AsyncDecoupledFrom(from_clock, from_reset, source.aw, depth, sync) + sink.ar <> AsyncDecoupledFrom(from_clock, from_reset, source.ar, depth, sync) + sink.w <> AsyncDecoupledFrom(from_clock, from_reset, source.w, depth, sync) + source.b <> AsyncDecoupledTo(from_clock, from_reset, sink.b, depth, sync) + source.r <> AsyncDecoupledTo(from_clock, from_reset, sink.r, depth, sync) + + sink + } +} From 66b9c5ad058dda823b0039d1a01989e37b8e69e3 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Wed, 13 Jul 2016 14:31:19 -0700 Subject: [PATCH 114/116] fix up cloneType calls in clock crossers --- junctions/src/main/scala/crossing.scala | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/junctions/src/main/scala/crossing.scala b/junctions/src/main/scala/crossing.scala index a4f974c0..0f71362e 100644 --- a/junctions/src/main/scala/crossing.scala +++ b/junctions/src/main/scala/crossing.scala @@ -2,8 +2,8 @@ package junctions import Chisel._ class Crossing[T <: Data](gen: T, enq_sync: Boolean, deq_sync: Boolean) extends Bundle { - val enq = Decoupled(gen.cloneType).flip() - val deq = Decoupled(gen.cloneType) + val enq = Decoupled(gen).flip() + val deq = Decoupled(gen) val enq_clock = if (enq_sync) Some(Clock(INPUT)) else None val deq_clock = if (deq_sync) Some(Clock(INPUT)) else None val enq_reset = if (enq_sync) Some(Bool(INPUT)) else None @@ -24,15 +24,15 @@ class AsyncHandshakeSource[T <: Data](gen: T, sync: Int, clock: Clock, reset: Bo extends Module(_clock = clock, _reset = reset) { val io = new Bundle { // These come from the source clock domain - val enq = Decoupled(gen.cloneType).flip() + val enq = Decoupled(gen).flip() // These cross to the sink clock domain - val bits = gen.cloneType + val bits = gen.cloneType.asOutput val push = Bool(OUTPUT) val pop = Bool(INPUT) } val ready = RegInit(Bool(true)) - val bits = Reg(gen.cloneType) + val bits = Reg(gen) val push = RegInit(Bool(false)) io.enq.ready := ready @@ -57,15 +57,15 @@ class AsyncHandshakeSink[T <: Data](gen: T, sync: Int, clock: Clock, reset: Bool extends Module(_clock = clock, _reset = reset) { val io = new Bundle { // These cross to the source clock domain - val bits = gen.cloneType.flip() + val bits = gen.cloneType.asInput val push = Bool(INPUT) val pop = Bool(OUTPUT) // These go to the sink clock domain - val deq = Decoupled(gen.cloneType) + val deq = Decoupled(gen) } val valid = RegInit(Bool(false)) - val bits = Reg(gen.cloneType) + val bits = Reg(gen) val pop = RegInit(Bool(false)) io.deq.valid := valid From 897e6ccf8a4057df2e5262c9768e05d0e5eede63 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Fri, 15 Jul 2016 15:39:00 -0700 Subject: [PATCH 115/116] fix Hasti and Smi converters --- junctions/src/main/scala/hasti.scala | 8 ++++++-- junctions/src/main/scala/smi.scala | 10 ++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index 12873eff..fe177500 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -403,7 +403,11 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( last = (len === UInt(0))) assert(!r_queue.io.enq.valid || r_queue.io.enq.ready, - "HASTI -> NASTI converter queue overflow") + "NASTI -> HASTI converter queue overflow") + + val next_count = r_queue.io.count + + r_queue.io.enq.valid - + r_queue.io.deq.ready io.hasti.haddr := addr io.hasti.hsize := size @@ -418,7 +422,7 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( Mux(first, HTRANS_IDLE, HTRANS_BUSY)), s_read -> MuxCase(HTRANS_BUSY, Seq( first -> HTRANS_NONSEQ, - (r_queue.io.count <= UInt(1)) -> HTRANS_SEQ)))) + (next_count <= UInt(1)) -> HTRANS_SEQ)))) when (io.nasti.aw.fire()) { first := Bool(true) diff --git a/junctions/src/main/scala/smi.scala b/junctions/src/main/scala/smi.scala index ce1fbe70..777dd075 100644 --- a/junctions/src/main/scala/smi.scala +++ b/junctions/src/main/scala/smi.scala @@ -240,16 +240,18 @@ class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int) } when (state === s_send) { - when (strb === UInt(0)) { - state := Mux(last, s_ack, s_data) - } .elsewhen (io.smi.req.ready || !strb(0)) { + when (io.smi.req.ready || !strb(0)) { strb := strb >> jump data := data >> Cat(jump, UInt(0, log2Up(dataWidth))) addr := addr + jump + when (strb(0)) { state := s_ack } } } - when (io.smi.resp.fire()) { state := s_resp } + when (io.smi.resp.fire()) { + state := Mux(strb === UInt(0), + Mux(last, s_resp, s_data), s_send) + } when (io.nasti.b.fire()) { state := s_idle } } From 59d700bf66c2952076333d6746d5542669c2acf8 Mon Sep 17 00:00:00 2001 From: Howard Mao Date: Fri, 15 Jul 2016 18:45:37 -0700 Subject: [PATCH 116/116] fix combinational loop in NASTI -> HASTI converter --- junctions/src/main/scala/hasti.scala | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/junctions/src/main/scala/hasti.scala b/junctions/src/main/scala/hasti.scala index fe177500..abb3ee81 100644 --- a/junctions/src/main/scala/hasti.scala +++ b/junctions/src/main/scala/hasti.scala @@ -405,9 +405,8 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( assert(!r_queue.io.enq.valid || r_queue.io.enq.ready, "NASTI -> HASTI converter queue overflow") - val next_count = r_queue.io.count + - r_queue.io.enq.valid - - r_queue.io.deq.ready + // How many read requests have we not delivered a response for yet? + val pending_count = r_queue.io.count + rvalid io.hasti.haddr := addr io.hasti.hsize := size @@ -422,7 +421,7 @@ class HastiMasterIONastiIOConverter(implicit p: Parameters) extends HastiModule( Mux(first, HTRANS_IDLE, HTRANS_BUSY)), s_read -> MuxCase(HTRANS_BUSY, Seq( first -> HTRANS_NONSEQ, - (next_count <= UInt(1)) -> HTRANS_SEQ)))) + (pending_count <= UInt(1)) -> HTRANS_SEQ)))) when (io.nasti.aw.fire()) { first := Bool(true)