1
0

moved buses to junctions repo

This commit is contained in:
Henry Cook
2015-07-29 18:04:30 -07:00
parent 8b1ab23347
commit c70b495f6d
6 changed files with 423 additions and 1007 deletions

View File

@ -2,6 +2,7 @@
package uncore
import Chisel._
import junctions._
import scala.math.max
/** Parameters exposed to the top-level design, set based on
@ -1237,3 +1238,422 @@ trait HasDataBeatCounters {
(cnt > UInt(0), up_idx, up_done, down_idx, down_done)
}
}
class NASTIMasterIOTileLinkIOConverter extends TLModule with NASTIParameters {
val io = new Bundle {
val tl = new ManagerTileLinkIO
val nasti = new NASTIMasterIO
}
val dataBits = tlDataBits*tlDataBeats
val dstIdBits = params(LNHeaderBits)
require(tlDataBits == nastiXDataBits, "Data sizes between LLC and MC don't agree") // TODO: remove this restriction
require(tlDataBeats < (1 << nastiXLenBits), "Can't have that many beats")
require(dstIdBits + tlClientXactIdBits < nastiXIdBits, "NASTIMasterIO converter is going truncate tags: " + dstIdBits + " + " + tlClientXactIdBits + " >= " + nastiXIdBits)
io.tl.acquire.ready := Bool(false)
io.tl.probe.valid := Bool(false)
io.tl.release.ready := Bool(false)
io.tl.finish.ready := Bool(true)
io.nasti.b.ready := Bool(false)
io.nasti.r.ready := Bool(false)
io.nasti.ar.valid := Bool(false)
io.nasti.aw.valid := Bool(false)
io.nasti.w.valid := Bool(false)
val dst_off = dstIdBits + tlClientXactIdBits
val acq_has_data = io.tl.acquire.bits.hasData()
val rel_has_data = io.tl.release.bits.hasData()
val is_write = io.tl.release.valid || (io.tl.acquire.valid && acq_has_data)
// Decompose outgoing TL Acquires into NASTI address and data channels
val active_out = Reg(init=Bool(false))
val cmd_sent_out = Reg(init=Bool(false))
val tag_out = Reg(UInt(width = nastiXIdBits))
val addr_out = Reg(UInt(width = nastiXAddrBits))
val has_data = Reg(init=Bool(false))
val data_from_rel = Reg(init=Bool(false))
val (tl_cnt_out, tl_wrap_out) =
Counter((io.tl.acquire.fire() && acq_has_data) ||
(io.tl.release.fire() && rel_has_data), tlDataBeats)
val tl_done_out = Reg(init=Bool(false))
io.nasti.ar.bits.id := tag_out
io.nasti.ar.bits.addr := addr_out
io.nasti.ar.bits.len := Mux(has_data, UInt(tlDataBeats-1), UInt(0))
io.nasti.ar.bits.size := UInt(log2Ceil(tlDataBits))
io.nasti.ar.bits.burst := UInt("b01")
io.nasti.ar.bits.lock := Bool(false)
io.nasti.ar.bits.cache := UInt("b0000")
io.nasti.ar.bits.prot := UInt("b000")
io.nasti.ar.bits.qos := UInt("b0000")
io.nasti.ar.bits.region := UInt("b0000")
io.nasti.ar.bits.user := UInt(0)
io.nasti.aw.bits := io.nasti.ar.bits
io.nasti.w.bits.strb := Mux(data_from_rel, SInt(-1), io.tl.acquire.bits.wmask())
io.nasti.w.bits.data := Mux(data_from_rel, io.tl.release.bits.data, io.tl.acquire.bits.data)
io.nasti.w.bits.last := tl_wrap_out
when(!active_out){
io.tl.release.ready := io.nasti.w.ready
io.tl.acquire.ready := io.nasti.w.ready && !io.tl.release.valid
io.nasti.w.valid := (io.tl.release.valid && rel_has_data) ||
(io.tl.acquire.valid && acq_has_data)
when(io.nasti.w.ready && (io.tl.release.valid || io.tl.acquire.valid)) {
active_out := (!is_write && !io.nasti.ar.ready) ||
(is_write && !(io.nasti.aw.ready && io.nasti.w.ready)) ||
(io.nasti.w.valid && Bool(tlDataBeats > 1))
io.nasti.aw.valid := is_write
io.nasti.ar.valid := !is_write
cmd_sent_out := (!is_write && io.nasti.ar.ready) || (is_write && io.nasti.aw.ready)
tl_done_out := tl_wrap_out
when(io.tl.release.valid) {
data_from_rel := Bool(true)
io.nasti.w.bits.data := io.tl.release.bits.data
io.nasti.w.bits.strb := SInt(-1)
val tag = Cat(io.tl.release.bits.client_id,
io.tl.release.bits.client_xact_id,
io.tl.release.bits.isVoluntary())
val addr = io.tl.release.bits.full_addr()
io.nasti.aw.bits.id := tag
io.nasti.aw.bits.addr := addr
io.nasti.aw.bits.len := UInt(tlDataBeats-1)
io.nasti.aw.bits.size := MT_Q
tag_out := tag
addr_out := addr
has_data := rel_has_data
} .elsewhen(io.tl.acquire.valid) {
data_from_rel := Bool(false)
io.nasti.w.bits.data := io.tl.acquire.bits.data
io.nasti.w.bits.strb := io.tl.acquire.bits.wmask()
val tag = Cat(io.tl.acquire.bits.client_id,
io.tl.acquire.bits.client_xact_id,
io.tl.acquire.bits.isBuiltInType())
val addr = io.tl.acquire.bits.full_addr()
when(is_write) {
io.nasti.aw.bits.id := tag
io.nasti.aw.bits.addr := addr
io.nasti.aw.bits.len := Mux(io.tl.acquire.bits.isBuiltInType(Acquire.putBlockType),
UInt(tlDataBeats-1), UInt(0))
io.nasti.aw.bits.size := bytesToXSize(PopCount(io.tl.acquire.bits.wmask()))
} .otherwise {
io.nasti.ar.bits.id := tag
io.nasti.ar.bits.addr := addr
io.nasti.ar.bits.len := Mux(io.tl.acquire.bits.isBuiltInType(Acquire.getBlockType),
UInt(tlDataBeats-1), UInt(0))
io.nasti.ar.bits.size := io.tl.acquire.bits.op_size()
}
tag_out := tag
addr_out := addr
has_data := acq_has_data
}
}
}
when(active_out) {
io.nasti.ar.valid := !cmd_sent_out && !has_data
io.nasti.aw.valid := !cmd_sent_out && has_data
cmd_sent_out := cmd_sent_out || io.nasti.ar.fire() || io.nasti.aw.fire()
when(has_data && !tl_done_out) {
when(data_from_rel) {
io.tl.release.ready := io.nasti.w.ready
io.nasti.w.valid := io.tl.release.valid
} .otherwise {
io.tl.acquire.ready := io.nasti.w.ready
io.nasti.w.valid := io.tl.acquire.valid
}
}
when(tl_wrap_out) { tl_done_out := Bool(true) }
when(cmd_sent_out && (!has_data || tl_done_out)) { active_out := Bool(false) }
}
// Aggregate incoming NASTI responses into TL Grants
val (tl_cnt_in, tl_wrap_in) = Counter(io.tl.grant.fire() && io.tl.grant.bits.hasMultibeatData(), tlDataBeats)
val gnt_arb = Module(new Arbiter(new GrantToDst, 2))
io.tl.grant <> gnt_arb.io.out
gnt_arb.io.in(0).valid := io.nasti.r.valid
io.nasti.r.ready := gnt_arb.io.in(0).ready
gnt_arb.io.in(0).bits := Grant(
dst = (if(dstIdBits > 0) io.nasti.r.bits.id(dst_off, tlClientXactIdBits + 1) else UInt(0)),
is_builtin_type = io.nasti.r.bits.id(0),
g_type = Mux(io.nasti.r.bits.id(0), Grant.getDataBlockType, UInt(0)), // TODO: Assumes MI or MEI protocol
client_xact_id = io.nasti.r.bits.id >> UInt(1),
manager_xact_id = UInt(0),
addr_beat = tl_cnt_in,
data = io.nasti.r.bits.data)
gnt_arb.io.in(1).valid := io.nasti.b.valid
io.nasti.b.ready := gnt_arb.io.in(1).ready
gnt_arb.io.in(1).bits := Grant(
dst = (if(dstIdBits > 0) io.nasti.b.bits.id(dst_off, tlClientXactIdBits + 1) else UInt(0)),
is_builtin_type = Bool(true),
g_type = Mux(io.nasti.b.bits.id(0), Grant.voluntaryAckType, Grant.putAckType),
client_xact_id = io.nasti.b.bits.id >> UInt(1),
manager_xact_id = UInt(0))
}
class MemPipeIOTileLinkIOConverter(outstanding: Int) extends MIFModule {
val io = new Bundle {
val tl = new ManagerTileLinkIO
val mem = new MemPipeIO
}
val a = Module(new MemIOTileLinkIOConverter(1))
val b = Module(new MemPipeIOMemIOConverter(outstanding))
a.io.tl <> io.tl
b.io.cpu.req_cmd <> Queue(a.io.mem.req_cmd, 2, pipe=true)
b.io.cpu.req_data <> Queue(a.io.mem.req_data, mifDataBeats, pipe=true)
a.io.mem.resp <> b.io.cpu.resp
b.io.mem <> io.mem
}
//Adapter betweewn an UncachedTileLinkIO and a mem controller MemIO
class MemIOTileLinkIOConverter(qDepth: Int) extends TLModule with MIFParameters {
val io = new Bundle {
val tl = new ManagerTileLinkIO
val mem = new MemIO
}
val dataBits = tlDataBits*tlDataBeats
val dstIdBits = params(LNHeaderBits)
require(tlDataBits*tlDataBeats == mifDataBits*mifDataBeats, "Data sizes between LLC and MC don't agree")
require(dstIdBits + tlClientXactIdBits < mifTagBits, "MemIO converter is going truncate tags: " + dstIdBits + " + " + tlClientXactIdBits + " >= " + mifTagBits)
io.tl.acquire.ready := Bool(false)
io.tl.probe.valid := Bool(false)
io.tl.release.ready := Bool(false)
io.tl.finish.ready := Bool(true)
io.mem.resp.ready := Bool(false)
val gnt_arb = Module(new Arbiter(new GrantToDst, 2))
io.tl.grant <> gnt_arb.io.out
val dst_off = dstIdBits + tlClientXactIdBits
val acq_has_data = io.tl.acquire.bits.hasData()
val rel_has_data = io.tl.release.bits.hasData()
// Decompose outgoing TL Acquires into MemIO cmd and data
val active_out = Reg(init=Bool(false))
val cmd_sent_out = Reg(init=Bool(false))
val tag_out = Reg(UInt(width = mifTagBits))
val addr_out = Reg(UInt(width = mifAddrBits))
val has_data = Reg(init=Bool(false))
val data_from_rel = Reg(init=Bool(false))
val (tl_cnt_out, tl_wrap_out) =
Counter((io.tl.acquire.fire() && acq_has_data) ||
(io.tl.release.fire() && rel_has_data), tlDataBeats)
val tl_done_out = Reg(init=Bool(false))
val make_grant_ack = Reg(init=Bool(false))
gnt_arb.io.in(1).valid := Bool(false)
gnt_arb.io.in(1).bits := Grant(
dst = (if(dstIdBits > 0) tag_out(dst_off, tlClientXactIdBits + 1) else UInt(0)),
is_builtin_type = Bool(true),
g_type = Mux(data_from_rel, Grant.voluntaryAckType, Grant.putAckType),
client_xact_id = tag_out >> UInt(1),
manager_xact_id = UInt(0))
if(tlDataBits != mifDataBits || tlDataBeats != mifDataBeats) {
val mem_cmd_q = Module(new Queue(new MemReqCmd, qDepth))
val mem_data_q = Module(new Queue(new MemData, qDepth))
mem_cmd_q.io.enq.valid := Bool(false)
mem_data_q.io.enq.valid := Bool(false)
val (mif_cnt_out, mif_wrap_out) = Counter(mem_data_q.io.enq.fire(), mifDataBeats)
val mif_done_out = Reg(init=Bool(false))
val tl_buf_out = Reg(Vec(io.tl.acquire.bits.data, tlDataBeats))
val mif_buf_out = Vec.fill(mifDataBeats){ new MemData }
mif_buf_out := mif_buf_out.fromBits(tl_buf_out.toBits)
val mif_prog_out = (mif_cnt_out+UInt(1, width = log2Up(mifDataBeats+1)))*UInt(mifDataBits)
val tl_prog_out = tl_cnt_out*UInt(tlDataBits)
when(!active_out){
io.tl.release.ready := Bool(true)
io.tl.acquire.ready := !io.tl.release.valid
when(io.tl.release.valid) {
active_out := Bool(true)
cmd_sent_out := Bool(false)
tag_out := Cat(io.tl.release.bits.client_id,
io.tl.release.bits.client_xact_id,
io.tl.release.bits.isVoluntary())
addr_out := io.tl.release.bits.addr_block
has_data := rel_has_data
data_from_rel := Bool(true)
make_grant_ack := io.tl.release.bits.requiresAck()
tl_done_out := tl_wrap_out
tl_buf_out(tl_cnt_out) := io.tl.release.bits.data
} .elsewhen(io.tl.acquire.valid) {
active_out := Bool(true)
cmd_sent_out := Bool(false)
tag_out := Cat(io.tl.release.bits.client_id,
io.tl.acquire.bits.client_xact_id,
io.tl.acquire.bits.isBuiltInType())
addr_out := io.tl.acquire.bits.addr_block
has_data := acq_has_data
data_from_rel := Bool(false)
make_grant_ack := acq_has_data
tl_done_out := tl_wrap_out
tl_buf_out(tl_cnt_out) := io.tl.acquire.bits.data
}
}
when(active_out) {
mem_cmd_q.io.enq.valid := !cmd_sent_out
cmd_sent_out := cmd_sent_out || mem_cmd_q.io.enq.fire()
when(has_data) {
when(!tl_done_out) {
io.tl.acquire.ready := Bool(true)
when(io.tl.acquire.valid) {
tl_buf_out(tl_cnt_out) := Mux(data_from_rel,
io.tl.release.bits.data,
io.tl.acquire.bits.data)
}
}
when(!mif_done_out) {
mem_data_q.io.enq.valid := tl_done_out || mif_prog_out <= tl_prog_out
}
}
when(tl_wrap_out) { tl_done_out := Bool(true) }
when(mif_wrap_out) { mif_done_out := Bool(true) }
when(tl_done_out && make_grant_ack) {
gnt_arb.io.in(1).valid := Bool(true)
when(gnt_arb.io.in(1).ready) { make_grant_ack := Bool(false) }
}
when(cmd_sent_out && (!has_data || mif_done_out) && !make_grant_ack) {
active_out := Bool(false)
}
}
mem_cmd_q.io.enq.bits.rw := has_data
mem_cmd_q.io.enq.bits.tag := tag_out
mem_cmd_q.io.enq.bits.addr := addr_out
mem_data_q.io.enq.bits.data := mif_buf_out(mif_cnt_out).data
io.mem.req_cmd <> mem_cmd_q.io.deq
io.mem.req_data <> mem_data_q.io.deq
} else { // Don't make the data buffers and try to flow cmd and data
io.mem.req_cmd.valid := Bool(false)
io.mem.req_data.valid := Bool(false)
io.mem.req_cmd.bits.rw := has_data
io.mem.req_cmd.bits.tag := tag_out
io.mem.req_cmd.bits.addr := addr_out
io.mem.req_data.bits.data := Mux(data_from_rel,
io.tl.release.bits.data,
io.tl.acquire.bits.data)
when(!active_out){
io.tl.release.ready := io.mem.req_data.ready
io.tl.acquire.ready := io.mem.req_data.ready && !io.tl.release.valid
io.mem.req_data.valid := (io.tl.release.valid && rel_has_data) ||
(io.tl.acquire.valid && acq_has_data)
when(io.mem.req_data.ready && (io.tl.release.valid || io.tl.acquire.valid)) {
active_out := !io.mem.req_cmd.ready || io.mem.req_data.valid
io.mem.req_cmd.valid := Bool(true)
cmd_sent_out := io.mem.req_cmd.ready
tl_done_out := tl_wrap_out
when(io.tl.release.valid) {
data_from_rel := Bool(true)
make_grant_ack := io.tl.release.bits.requiresAck()
io.mem.req_data.bits.data := io.tl.release.bits.data
val tag = Cat(io.tl.release.bits.client_id,
io.tl.release.bits.client_xact_id,
io.tl.release.bits.isVoluntary())
val addr = io.tl.release.bits.addr_block
io.mem.req_cmd.bits.tag := tag
io.mem.req_cmd.bits.addr := addr
io.mem.req_cmd.bits.rw := rel_has_data
tag_out := tag
addr_out := addr
has_data := rel_has_data
} .elsewhen(io.tl.acquire.valid) {
data_from_rel := Bool(false)
make_grant_ack := acq_has_data // i.e. is it a Put
io.mem.req_data.bits.data := io.tl.acquire.bits.data
io.mem.req_cmd.bits.rw := acq_has_data
val tag = Cat(io.tl.acquire.bits.client_id,
io.tl.acquire.bits.client_xact_id,
io.tl.acquire.bits.isBuiltInType())
val addr = io.tl.acquire.bits.addr_block
io.mem.req_cmd.bits.tag := tag
io.mem.req_cmd.bits.addr := addr
io.mem.req_cmd.bits.rw := acq_has_data
tag_out := tag
addr_out := addr
has_data := acq_has_data
}
}
}
when(active_out) {
io.mem.req_cmd.valid := !cmd_sent_out
cmd_sent_out := cmd_sent_out || io.mem.req_cmd.fire()
when(has_data && !tl_done_out) {
when(data_from_rel) {
io.tl.release.ready := io.mem.req_data.ready
io.mem.req_data.valid := io.tl.release.valid
} .otherwise {
io.tl.acquire.ready := io.mem.req_data.ready
io.mem.req_data.valid := io.tl.acquire.valid
}
}
when(tl_wrap_out) { tl_done_out := Bool(true) }
when(tl_done_out && make_grant_ack) {
gnt_arb.io.in(1).valid := Bool(true) // TODO: grants for voluntary acks?
when(gnt_arb.io.in(1).ready) { make_grant_ack := Bool(false) }
}
when(cmd_sent_out && (!has_data || tl_done_out) && !make_grant_ack) {
active_out := Bool(false)
}
}
}
// Aggregate incoming MemIO responses into TL Grants
val active_in = Reg(init=Bool(false))
val (tl_cnt_in, tl_wrap_in) = Counter(io.tl.grant.fire() && io.tl.grant.bits.hasMultibeatData(), tlDataBeats)
val tag_in = Reg(UInt(width = mifTagBits))
if(tlDataBits != mifDataBits || tlDataBeats != mifDataBeats) {
val (mif_cnt_in, mif_wrap_in) = Counter(io.mem.resp.fire(), mifDataBeats) // TODO: Assumes all resps have data
val mif_done_in = Reg(init=Bool(false))
val mif_buf_in = Reg(Vec(new MemData, mifDataBeats))
val tl_buf_in = Vec.fill(tlDataBeats){ io.tl.acquire.bits.data }
tl_buf_in := tl_buf_in.fromBits(mif_buf_in.toBits)
val tl_prog_in = (tl_cnt_in+UInt(1, width = log2Up(tlDataBeats+1)))*UInt(tlDataBits)
val mif_prog_in = mif_cnt_in*UInt(mifDataBits)
gnt_arb.io.in(0).bits := Grant(
dst = (if(dstIdBits > 0) tag_in(dst_off, tlClientXactIdBits + 1) else UInt(0)),
is_builtin_type = tag_in(0),
g_type = Mux(tag_in(0), Grant.getDataBlockType, UInt(0)), // TODO: Assumes MI or MEI protocol
client_xact_id = tag_in >> UInt(1),
manager_xact_id = UInt(0),
addr_beat = tl_cnt_in,
data = tl_buf_in(tl_cnt_in))
when(!active_in) {
io.mem.resp.ready := Bool(true)
when(io.mem.resp.valid) {
active_in := Bool(true)
mif_done_in := mif_wrap_in
tag_in := io.mem.resp.bits.tag
mif_buf_in(tl_cnt_in).data := io.mem.resp.bits.data
}
}
when(active_in) {
gnt_arb.io.in(0).valid := mif_done_in || tl_prog_in <= mif_prog_in
when(!mif_done_in) {
io.mem.resp.ready := Bool(true)
when(io.mem.resp.valid) {
mif_buf_in(mif_cnt_in).data := io.mem.resp.bits.data
}
}
when(mif_wrap_in) { mif_done_in := Bool(true) }
when(tl_wrap_in) { active_in := Bool(false) }
}
} else { // Don't generate all the uneeded data buffers and flow resp
gnt_arb.io.in(0).valid := io.mem.resp.valid
io.mem.resp.ready := gnt_arb.io.in(0).ready
gnt_arb.io.in(0).bits := Grant(
dst = (if(dstIdBits > 0) io.mem.resp.bits.tag(dst_off, tlClientXactIdBits + 1) else UInt(0)),
is_builtin_type = io.mem.resp.bits.tag(0),
g_type = Mux(io.mem.resp.bits.tag(0), Grant.getDataBlockType, UInt(0)), // TODO: Assumes MI or MEI protocol
client_xact_id = io.mem.resp.bits.tag >> UInt(1),
manager_xact_id = UInt(0),
addr_beat = tl_cnt_in,
data = io.mem.resp.bits.data)
}
}