Fix toBits/toUInt/toSInt deprecation warnings
This commit is contained in:
parent
a6e009d8de
commit
832e56d3c7
@ -57,7 +57,7 @@ object NoiseMaker
|
||||
object MaskMaker
|
||||
{
|
||||
def apply(wide: Int, bits: UInt): UInt =
|
||||
Vec.tabulate(wide) {UInt(_) < bits} .toBits() .asUInt()
|
||||
Vec.tabulate(wide) {UInt(_) < bits} .asUInt
|
||||
}
|
||||
|
||||
class ComparatorSource(implicit val p: Parameters) extends Module
|
||||
|
@ -467,12 +467,12 @@ class HastiTestSRAM(depth: Int)(implicit p: Parameters) extends HastiModule()(p)
|
||||
val mask_decode = Vec.tabulate(hastiAlignment+1) (UInt(_) <= io.hsize)
|
||||
val mask_wide = Vec.tabulate(hastiDataBytes) { i => mask_decode(log2Up(i+1)) }
|
||||
val mask_shift = if (hastiAlignment == 0) UInt(1) else
|
||||
mask_wide.toBits().asUInt() << io.haddr(hastiAlignment-1,0)
|
||||
mask_wide.asUInt() << io.haddr(hastiAlignment-1,0)
|
||||
|
||||
// The request had better have been aligned! (AHB-lite requires this)
|
||||
if (hastiAlignment >= 1) {
|
||||
assert (io.htrans === HTRANS_IDLE || io.htrans === HTRANS_BUSY ||
|
||||
(io.haddr & mask_decode.toBits()(hastiAlignment,1).asUInt) === UInt(0),
|
||||
(io.haddr & mask_decode.asUInt()(hastiAlignment,1)) === UInt(0),
|
||||
"HASTI request not aligned")
|
||||
}
|
||||
|
||||
@ -543,7 +543,7 @@ class HastiTestSRAM(depth: Int)(implicit p: Parameters) extends HastiModule()(p)
|
||||
map { case (m, p) => Mux(d_read && ready && m, p, Bits(0)) })
|
||||
|
||||
// Finally, the outputs
|
||||
io.hrdata := outdata.toBits()
|
||||
io.hrdata := outdata.asUInt
|
||||
io.hready := ready
|
||||
io.hresp := HRESP_OKAY
|
||||
}
|
||||
|
@ -66,8 +66,8 @@ class MemSerdes(w: Int)(implicit p: Parameters) extends MIFModule
|
||||
val wide = new MemIO().flip
|
||||
val narrow = new MemSerializedIO(w)
|
||||
}
|
||||
val abits = io.wide.req_cmd.bits.toBits.getWidth
|
||||
val dbits = io.wide.req_data.bits.toBits.getWidth
|
||||
val abits = io.wide.req_cmd.bits.asUInt.getWidth
|
||||
val dbits = io.wide.req_data.bits.asUInt.getWidth
|
||||
val rbits = io.wide.resp.bits.getWidth
|
||||
|
||||
val out_buf = Reg(Bits())
|
||||
@ -85,10 +85,10 @@ class MemSerdes(w: Int)(implicit p: Parameters) extends MIFModule
|
||||
out_buf := out_buf >> UInt(w)
|
||||
}
|
||||
when (io.wide.req_cmd.valid && io.wide.req_cmd.ready) {
|
||||
out_buf := io.wide.req_cmd.bits.toBits
|
||||
out_buf := io.wide.req_cmd.bits.asUInt
|
||||
}
|
||||
when (io.wide.req_data.valid && io.wide.req_data.ready) {
|
||||
out_buf := io.wide.req_data.bits.toBits
|
||||
out_buf := io.wide.req_data.bits.asUInt
|
||||
}
|
||||
|
||||
io.wide.req_cmd.ready := state === s_idle
|
||||
@ -143,8 +143,8 @@ class MemDesserIO(w: Int)(implicit p: Parameters) extends ParameterizedBundle()(
|
||||
class MemDesser(w: Int)(implicit p: Parameters) extends Module // test rig side
|
||||
{
|
||||
val io = new MemDesserIO(w)
|
||||
val abits = io.wide.req_cmd.bits.toBits.getWidth
|
||||
val dbits = io.wide.req_data.bits.toBits.getWidth
|
||||
val abits = io.wide.req_cmd.bits.asUInt.getWidth
|
||||
val dbits = io.wide.req_data.bits.asUInt.getWidth
|
||||
val rbits = io.wide.resp.bits.getWidth
|
||||
val mifDataBeats = p(MIFDataBeats)
|
||||
|
||||
@ -203,7 +203,7 @@ class MemDesser(w: Int)(implicit p: Parameters) extends Module // test rig side
|
||||
dataq.io.deq.ready := recv_cnt === UInt((rbits-1)/w)
|
||||
|
||||
io.narrow.resp.valid := dataq.io.deq.valid
|
||||
io.narrow.resp.bits := dataq.io.deq.bits.toBits >> (recv_cnt * UInt(w))
|
||||
io.narrow.resp.bits := dataq.io.deq.bits.asUInt >> (recv_cnt * UInt(w))
|
||||
}
|
||||
|
||||
class MemIOArbiter(val arbN: Int)(implicit p: Parameters) extends MIFModule {
|
||||
@ -239,7 +239,7 @@ class MemIOArbiter(val arbN: Int)(implicit p: Parameters) extends MIFModule {
|
||||
io.outer.resp.ready := Bool(false)
|
||||
for (i <- 0 until arbN) {
|
||||
io.inner(i).resp.valid := Bool(false)
|
||||
when(io.outer.resp.bits.tag(log2Up(arbN)-1,0).toUInt === UInt(i)) {
|
||||
when(io.outer.resp.bits.tag(log2Up(arbN)-1,0) === UInt(i)) {
|
||||
io.inner(i).resp.valid := io.outer.resp.valid
|
||||
io.outer.resp.ready := io.inner(i).resp.ready
|
||||
}
|
||||
|
@ -21,8 +21,8 @@ class SlowIO[T <: Data](val divisor_max: Int)(data: => T) extends Module
|
||||
val hold = Reg(init=UInt(divisor_max/4-1))
|
||||
val h_shadow = Reg(init=UInt(divisor_max/4-1))
|
||||
when (io.set_divisor.valid) {
|
||||
d_shadow := io.set_divisor.bits(log2Up(divisor_max)-1, 0).toUInt
|
||||
h_shadow := io.set_divisor.bits(log2Up(divisor_max)-1+16, 16).toUInt
|
||||
d_shadow := io.set_divisor.bits(log2Up(divisor_max)-1, 0)
|
||||
h_shadow := io.set_divisor.bits(log2Up(divisor_max)-1+16, 16)
|
||||
}
|
||||
io.divisor := (hold << 16) | divisor
|
||||
|
||||
|
@ -128,7 +128,7 @@ class SmiIONastiReadIOConverter(val dataWidth: Int, val addrWidth: Int)
|
||||
io.nasti.r.valid := (state === s_resp)
|
||||
io.nasti.r.bits := NastiReadDataChannel(
|
||||
id = id,
|
||||
data = buffer.toBits,
|
||||
data = buffer.asUInt,
|
||||
last = (nBeats === UInt(0)))
|
||||
|
||||
when (io.nasti.ar.fire()) {
|
||||
@ -192,7 +192,7 @@ class SmiIONastiWriteIOConverter(val dataWidth: Int, val addrWidth: Int)
|
||||
def makeStrobe(offset: UInt, size: UInt, strb: UInt) = {
|
||||
val sizemask = (UInt(1) << (UInt(1) << size)) - UInt(1)
|
||||
val bytemask = strb & (sizemask << offset)
|
||||
Vec.tabulate(maxWordsPerBeat){i => bytemask(dataBytes * i)}.toBits
|
||||
Vec.tabulate(maxWordsPerBeat){i => bytemask(dataBytes * i)}.asUInt
|
||||
}
|
||||
|
||||
val size = Reg(UInt(width = nastiXSizeBits))
|
||||
|
@ -139,7 +139,7 @@ class StreamExpander(win: Int, wout: Int) extends Module {
|
||||
|
||||
io.in.ready := collecting
|
||||
io.out.valid := !collecting
|
||||
io.out.bits.data := buffer.toBits
|
||||
io.out.bits.data := buffer.asUInt
|
||||
io.out.bits.last := last
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ class Serializer[T <: Data with Serializable](w: Int, typ: T) extends Module {
|
||||
}
|
||||
|
||||
val narrower = Module(new StreamNarrower(typ.nbits, w))
|
||||
narrower.io.in.bits.data := io.in.bits.toBits
|
||||
narrower.io.in.bits.data := io.in.bits.asUInt
|
||||
narrower.io.in.bits.last := Bool(true)
|
||||
narrower.io.in.valid := io.in.valid
|
||||
io.in.ready := narrower.io.in.ready
|
||||
|
@ -160,11 +160,11 @@ class BTB(implicit p: Parameters) extends BtbModule {
|
||||
private def page(addr: UInt) = addr >> matchBits
|
||||
private def pageMatch(addr: UInt) = {
|
||||
val p = page(addr)
|
||||
pageValid & pages.map(_ === p).toBits
|
||||
pageValid & pages.map(_ === p).asUInt
|
||||
}
|
||||
private def tagMatch(addr: UInt, pgMatch: UInt) = {
|
||||
val idxMatch = idxs.map(_ === addr(matchBits-1, log2Up(coreInstBytes))).toBits
|
||||
val idxPageMatch = idxPagesOH.map(_ & pgMatch).map(_.orR).toBits
|
||||
val idxMatch = idxs.map(_ === addr(matchBits-1, log2Up(coreInstBytes))).asUInt
|
||||
val idxPageMatch = idxPagesOH.map(_ & pgMatch).map(_.orR).asUInt
|
||||
idxMatch & idxPageMatch & isValid
|
||||
}
|
||||
|
||||
@ -173,7 +173,7 @@ class BTB(implicit p: Parameters) extends BtbModule {
|
||||
|
||||
val pageHit = pageMatch(io.req.bits.addr)
|
||||
val hitsVec = tagMatch(io.req.bits.addr, pageHit)
|
||||
val hits = hitsVec.toBits
|
||||
val hits = hitsVec.asUInt
|
||||
val updatePageHit = pageMatch(r_btb_update.bits.pc)
|
||||
|
||||
val updateHits = tagMatch(r_btb_update.bits.pc, updatePageHit)
|
||||
|
@ -91,7 +91,7 @@ object CSR
|
||||
{
|
||||
// commands
|
||||
val SZ = 3
|
||||
val X = BitPat.DC(SZ)
|
||||
val X = BitPat.dontCare(SZ)
|
||||
val N = UInt(0,SZ)
|
||||
val W = UInt(1,SZ)
|
||||
val S = UInt(2,SZ)
|
||||
@ -164,7 +164,7 @@ class CSRFile(implicit p: Parameters) extends CoreModule()(p)
|
||||
del.mtip := false
|
||||
del.meip := false
|
||||
|
||||
(sup.toBits, del.toBits)
|
||||
(sup.asUInt, del.asUInt)
|
||||
}
|
||||
val delegable_exceptions = UInt(Seq(
|
||||
Causes.misaligned_fetch,
|
||||
@ -214,7 +214,7 @@ class CSRFile(implicit p: Parameters) extends CoreModule()(p)
|
||||
|
||||
val mip = Wire(init=reg_mip)
|
||||
mip.rocc := io.rocc.interrupt
|
||||
val read_mip = mip.toBits & supported_interrupts
|
||||
val read_mip = mip.asUInt & supported_interrupts
|
||||
|
||||
val pending_interrupts = read_mip & reg_mie
|
||||
val m_interrupts = Mux(!reg_debug && (reg_mstatus.prv < PRV.M || (reg_mstatus.prv === PRV.M && reg_mstatus.mie)), pending_interrupts & ~reg_mideleg, UInt(0))
|
||||
@ -244,11 +244,11 @@ class CSRFile(implicit p: Parameters) extends CoreModule()(p)
|
||||
(if (usingRoCC) "X" else "")
|
||||
val isa = (BigInt(log2Ceil(xLen) - 4) << (xLen-2)) |
|
||||
isa_string.map(x => 1 << (x - 'A')).reduce(_|_)
|
||||
val read_mstatus = io.status.toBits()(xLen-1,0)
|
||||
val read_mstatus = io.status.asUInt()(xLen-1,0)
|
||||
|
||||
val read_mapping = collection.mutable.LinkedHashMap[Int,Bits](
|
||||
CSRs.tdrselect -> reg_tdrselect.toBits,
|
||||
CSRs.tdrdata1 -> reg_bp(reg_tdrselect.tdrindex).control.toBits,
|
||||
CSRs.tdrselect -> reg_tdrselect.asUInt,
|
||||
CSRs.tdrdata1 -> reg_bp(reg_tdrselect.tdrindex).control.asUInt,
|
||||
CSRs.tdrdata2 -> reg_bp(reg_tdrselect.tdrindex).address,
|
||||
CSRs.mimpid -> UInt(0),
|
||||
CSRs.marchid -> UInt(0),
|
||||
@ -273,9 +273,9 @@ class CSRFile(implicit p: Parameters) extends CoreModule()(p)
|
||||
CSRs.mhartid -> io.prci.id)
|
||||
|
||||
if (usingDebug) {
|
||||
read_mapping += CSRs.dcsr -> reg_dcsr.toBits
|
||||
read_mapping += CSRs.dpc -> reg_dpc.toBits
|
||||
read_mapping += CSRs.dscratch -> reg_dscratch.toBits
|
||||
read_mapping += CSRs.dcsr -> reg_dcsr.asUInt
|
||||
read_mapping += CSRs.dpc -> reg_dpc.asUInt
|
||||
read_mapping += CSRs.dscratch -> reg_dscratch.asUInt
|
||||
}
|
||||
|
||||
if (usingFPU) {
|
||||
@ -297,13 +297,13 @@ class CSRFile(implicit p: Parameters) extends CoreModule()(p)
|
||||
read_sstatus.mie := 0
|
||||
read_sstatus.hie := 0
|
||||
|
||||
read_mapping += CSRs.sstatus -> (read_sstatus.toBits())(xLen-1,0)
|
||||
read_mapping += CSRs.sip -> read_sip.toBits
|
||||
read_mapping += CSRs.sie -> read_sie.toBits
|
||||
read_mapping += CSRs.sstatus -> (read_sstatus.asUInt())(xLen-1,0)
|
||||
read_mapping += CSRs.sip -> read_sip.asUInt
|
||||
read_mapping += CSRs.sie -> read_sie.asUInt
|
||||
read_mapping += CSRs.sscratch -> reg_sscratch
|
||||
read_mapping += CSRs.scause -> reg_scause
|
||||
read_mapping += CSRs.sbadaddr -> reg_sbadaddr.sextTo(xLen)
|
||||
read_mapping += CSRs.sptbr -> reg_sptbr.toBits
|
||||
read_mapping += CSRs.sptbr -> reg_sptbr.asUInt
|
||||
read_mapping += CSRs.sepc -> reg_sepc.sextTo(xLen)
|
||||
read_mapping += CSRs.stvec -> reg_stvec.sextTo(xLen)
|
||||
read_mapping += CSRs.mscounteren -> UInt(0)
|
||||
|
@ -34,7 +34,7 @@ class DCacheDataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
val data = Vec.tabulate(rowBytes)(i => io.req.bits.wdata(8*(i+1)-1, 8*i))
|
||||
array.write(addr, data, io.req.bits.wmask.toBools)
|
||||
}
|
||||
io.resp(w) := array.read(addr, valid && !io.req.bits.write).toBits
|
||||
io.resp(w) := array.read(addr, valid && !io.req.bits.write).asUInt
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,9 +116,9 @@ class DCache(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
|
||||
val s1_paddr = Cat(tlb.io.resp.ppn, s1_req.addr(pgIdxBits-1,0))
|
||||
val s1_tag = Mux(s1_probe, probe_bits.addr_block >> idxBits, s1_paddr(paddrBits-1, untagBits))
|
||||
val s1_hit_way = meta.io.resp.map(r => r.coh.isValid() && r.tag === s1_tag).toBits
|
||||
val s1_hit_way = meta.io.resp.map(r => r.coh.isValid() && r.tag === s1_tag).asUInt
|
||||
val s1_hit_state = ClientMetadata.onReset.fromBits(
|
||||
meta.io.resp.map(r => Mux(r.tag === s1_tag, r.coh.toBits, UInt(0)))
|
||||
meta.io.resp.map(r => Mux(r.tag === s1_tag, r.coh.asUInt, UInt(0)))
|
||||
.reduce (_|_))
|
||||
val s1_data_way = Mux(inWriteback, releaseWay, s1_hit_way)
|
||||
val s1_data = Mux1H(s1_data_way, data.io.resp) // retime into s2 if critical
|
||||
|
@ -25,20 +25,20 @@ object DecodeLogic
|
||||
for (u <- t.tail)
|
||||
assert(!t.head._2.intersects(u._2), "DecodeLogic: keys " + t.head + " and " + u + " overlap")
|
||||
|
||||
(0 until default.getWidth.max(values.map(_.getWidth).max)).map({ case (i: Int) =>
|
||||
Cat((0 until default.getWidth.max(values.map(_.getWidth).max)).map({ case (i: Int) =>
|
||||
val mint = termvalues.filter { case (k,t) => ((t.mask >> i) & 1) == 0 && ((t.value >> i) & 1) == 1 }.map(_._1)
|
||||
val maxt = termvalues.filter { case (k,t) => ((t.mask >> i) & 1) == 0 && ((t.value >> i) & 1) == 0 }.map(_._1)
|
||||
val dc = termvalues.filter { case (k,t) => ((t.mask >> i) & 1) == 1 }.map(_._1)
|
||||
|
||||
if (((dterm.mask >> i) & 1) != 0) {
|
||||
logic(addr, addrWidth, cache, SimplifyDC(mint, maxt, addrWidth)).toBits
|
||||
logic(addr, addrWidth, cache, SimplifyDC(mint, maxt, addrWidth))
|
||||
} else {
|
||||
val defbit = (dterm.value.toInt >> i) & 1
|
||||
val t = if (defbit == 0) mint else maxt
|
||||
val bit = logic(addr, addrWidth, cache, Simplify(t, dc, addrWidth)).toBits
|
||||
val bit = logic(addr, addrWidth, cache, Simplify(t, dc, addrWidth))
|
||||
if (defbit == 0) bit else ~bit
|
||||
}
|
||||
}).reverse.reduceRight(Cat(_,_))
|
||||
}).reverse)
|
||||
}
|
||||
def apply(addr: UInt, default: Seq[BitPat], mappingIn: Iterable[(BitPat, Seq[BitPat])]): Seq[UInt] = {
|
||||
val mapping = collection.mutable.ArrayBuffer.fill(default.size)(collection.mutable.ArrayBuffer[(BitPat, BitPat)]())
|
||||
@ -51,7 +51,7 @@ object DecodeLogic
|
||||
def apply(addr: UInt, default: Seq[BitPat], mappingIn: List[(UInt, Seq[BitPat])]): Seq[UInt] =
|
||||
apply(addr, default, mappingIn.map(m => (BitPat(m._1), m._2)).asInstanceOf[Iterable[(BitPat, Seq[BitPat])]])
|
||||
def apply(addr: UInt, trues: Iterable[UInt], falses: Iterable[UInt]): Bool =
|
||||
apply(addr, BitPat.DC(1), trues.map(BitPat(_) -> BitPat("b1")) ++ falses.map(BitPat(_) -> BitPat("b0"))).toBool
|
||||
apply(addr, BitPat.dontCare(1), trues.map(BitPat(_) -> BitPat("b1")) ++ falses.map(BitPat(_) -> BitPat("b0"))).toBool
|
||||
private val caches = collection.mutable.Map[UInt,collection.mutable.Map[Term,Bool]]()
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ class DmaFrontend(implicit p: Parameters) extends CoreModule()(p)
|
||||
source = source,
|
||||
dest = dest,
|
||||
length = length,
|
||||
size = size).toBits
|
||||
size = size).asUInt
|
||||
val data_beats = Vec(tlDataBeats, UInt(width = tlDataBits)).fromBits(data_blob)
|
||||
val base_addr = addrMap("devices:dma").start
|
||||
val addr_block = UInt(base_addr >> (tlBeatAddrBits + tlByteAddrBits))
|
||||
@ -341,7 +341,7 @@ class DmaCtrlRegFile(implicit val p: Parameters) extends Module
|
||||
io.word_size := regs(WORD_SIZE)
|
||||
|
||||
when (io.wen && waddr < UInt(nWriteRegs)) {
|
||||
regs.write(waddr, io.wdata)
|
||||
regs(waddr) := io.wdata
|
||||
}
|
||||
|
||||
val outstanding_cnt = TwoWayCounter(
|
||||
|
@ -77,7 +77,7 @@ class ALU(implicit p: Parameters) extends CoreModule()(p) {
|
||||
(shamt, Cat(shin_hi, io.in1(31,0)))
|
||||
}
|
||||
val shin = Mux(io.fn === FN_SR || io.fn === FN_SRA, shin_r, Reverse(shin_r))
|
||||
val shout_r = (Cat(isSub(io.fn) & shin(xLen-1), shin).toSInt >> shamt)(xLen-1,0)
|
||||
val shout_r = (Cat(isSub(io.fn) & shin(xLen-1), shin).asSInt >> shamt)(xLen-1,0)
|
||||
val shout_l = Reverse(shout_r)
|
||||
val shout = Mux(io.fn === FN_SR || io.fn === FN_SRA, shout_r, UInt(0)) |
|
||||
Mux(io.fn === FN_SL, shout_l, UInt(0))
|
||||
|
@ -274,7 +274,7 @@ class FPToInt extends Module
|
||||
io.out.bits.exc := dcmp_exc
|
||||
}
|
||||
when (in.cmd === FCMD_CVT_IF) {
|
||||
io.out.bits.toint := Mux(in.typ(1), d2l.io.out.toSInt, d2w.io.out.toSInt).toUInt
|
||||
io.out.bits.toint := Mux(in.typ(1), d2l.io.out.asSInt, d2w.io.out.asSInt).asUInt
|
||||
val dflags = Mux(in.typ(1), d2l.io.intExceptionFlags, d2w.io.intExceptionFlags)
|
||||
io.out.bits.exc := Cat(dflags(2, 1).orR, UInt(0, 3), dflags(0))
|
||||
}
|
||||
@ -301,16 +301,16 @@ class IntToFP(val latency: Int) extends Module
|
||||
}
|
||||
|
||||
val longValue =
|
||||
Mux(in.bits.typ(1), in.bits.in1.toSInt,
|
||||
Mux(in.bits.typ(0), in.bits.in1(31,0).zext, in.bits.in1(31,0).toSInt))
|
||||
Mux(in.bits.typ(1), in.bits.in1.asSInt,
|
||||
Mux(in.bits.typ(0), in.bits.in1(31,0).zext, in.bits.in1(31,0).asSInt))
|
||||
val l2s = Module(new hardfloat.INToRecFN(64, 8, 24))
|
||||
l2s.io.signedIn := ~in.bits.typ(0)
|
||||
l2s.io.in := longValue.toUInt
|
||||
l2s.io.in := longValue.asUInt
|
||||
l2s.io.roundingMode := in.bits.rm
|
||||
|
||||
val l2d = Module(new hardfloat.INToRecFN(64, 11, 53))
|
||||
l2d.io.signedIn := ~in.bits.typ(0)
|
||||
l2d.io.in := longValue.toUInt
|
||||
l2d.io.in := longValue.asUInt
|
||||
l2d.io.roundingMode := in.bits.rm
|
||||
|
||||
when (in.bits.cmd === FCMD_CVT_FI) {
|
||||
@ -539,14 +539,21 @@ class FPU(implicit p: Parameters) extends CoreModule()(p) {
|
||||
val maxLatency = pipes.map(_.lat).max
|
||||
val memLatencyMask = latencyMask(mem_ctrl, 2)
|
||||
|
||||
class WBInfo extends Bundle {
|
||||
val rd = UInt(width = 5)
|
||||
val single = Bool()
|
||||
val cp = Bool()
|
||||
val pipeid = UInt(width = log2Ceil(pipes.size))
|
||||
override def cloneType: this.type = new WBInfo().asInstanceOf[this.type]
|
||||
}
|
||||
|
||||
val wen = Reg(init=Bits(0, maxLatency-1))
|
||||
val winfo = Reg(Vec(maxLatency-1, Bits()))
|
||||
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
|
||||
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
|
||||
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
|
||||
val mem_winfo = Cat(mem_cp_valid, pipeid(mem_ctrl), mem_ctrl.single, mem_reg_inst(11,7)) //single only used for debugging
|
||||
|
||||
for (i <- 0 until maxLatency-2) {
|
||||
when (wen(i+1)) { winfo(i) := winfo(i+1) }
|
||||
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
|
||||
}
|
||||
wen := wen >> 1
|
||||
when (mem_wen) {
|
||||
@ -555,27 +562,27 @@ class FPU(implicit p: Parameters) extends CoreModule()(p) {
|
||||
}
|
||||
for (i <- 0 until maxLatency-1) {
|
||||
when (!write_port_busy && memLatencyMask(i)) {
|
||||
winfo(i) := mem_winfo
|
||||
wbInfo(i).cp := mem_cp_valid
|
||||
wbInfo(i).single := mem_ctrl.single
|
||||
wbInfo(i).pipeid := pipeid(mem_ctrl)
|
||||
wbInfo(i).rd := mem_reg_inst(11,7)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val waddr = Mux(divSqrt_wen, divSqrt_waddr, winfo(0)(4,0).toUInt)
|
||||
val wsrc = (winfo(0) >> 6)(log2Up(pipes.size) - 1,0)
|
||||
val wcp = winfo(0)(6+log2Up(pipes.size))
|
||||
val wdata = Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wsrc))
|
||||
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wsrc)
|
||||
when ((!wcp && wen(0)) || divSqrt_wen) {
|
||||
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
|
||||
val wdata = Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid))
|
||||
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
|
||||
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
|
||||
regfile(waddr) := wdata
|
||||
if (enableCommitLog) {
|
||||
val wdata_unrec_s = hardfloat.fNFromRecFN(8, 24, wdata(64,0))
|
||||
val wdata_unrec_d = hardfloat.fNFromRecFN(11, 53, wdata(64,0))
|
||||
val wb_single = (winfo(0) >> 5)(0)
|
||||
printf ("f%d p%d 0x%x\n", waddr, waddr+ UInt(32),
|
||||
Mux(wb_single, Cat(UInt(0,32), wdata_unrec_s), wdata_unrec_d))
|
||||
Mux(wbInfo(0).single, Cat(UInt(0,32), wdata_unrec_s), wdata_unrec_d))
|
||||
}
|
||||
}
|
||||
when (wcp && wen(0)) {
|
||||
when (wbInfo(0).cp && wen(0)) {
|
||||
io.cp_resp.bits.data := wdata
|
||||
io.cp_resp.valid := Bool(true)
|
||||
}
|
||||
@ -595,7 +602,7 @@ class FPU(implicit p: Parameters) extends CoreModule()(p) {
|
||||
io.dec <> fp_decoder.io.sigs
|
||||
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(Bool(false))(_||_)
|
||||
io.sboard_set := wb_reg_valid && !wb_cp_valid && Reg(next=useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt)
|
||||
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wsrc === UInt(x._2))))
|
||||
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === UInt(x._2))))
|
||||
io.sboard_clra := waddr
|
||||
// we don't currently support round-max-magnitude (rm=4)
|
||||
io.illegal_rm := ex_rm(2) && ex_ctrl.round
|
||||
|
@ -57,7 +57,7 @@ class Frontend(implicit p: Parameters) extends CoreModule()(p) with HasL1CachePa
|
||||
val predicted_npc = Wire(init = ntpc)
|
||||
val predicted_taken = Wire(init = Bool(false))
|
||||
val icmiss = s2_valid && !icache.io.resp.valid
|
||||
val npc = Mux(icmiss, s2_pc, predicted_npc).toUInt
|
||||
val npc = Mux(icmiss, s2_pc, predicted_npc)
|
||||
val s0_same_block = !predicted_taken && !icmiss && !io.cpu.req.valid && ntpc_same_block
|
||||
|
||||
val stall = io.cpu.resp.valid && !io.cpu.resp.ready
|
||||
|
@ -49,7 +49,7 @@ class ICache(latency: Int)(implicit p: Parameters) extends CoreModule()(p) with
|
||||
|
||||
val s1_valid = Reg(init=Bool(false))
|
||||
val s1_vaddr = Reg(UInt())
|
||||
val s1_paddr = Cat(io.s1_ppn, s1_vaddr(pgIdxBits-1,0)).toUInt
|
||||
val s1_paddr = Cat(io.s1_ppn, s1_vaddr(pgIdxBits-1,0))
|
||||
val s1_tag = s1_paddr(tagBits+untagBits-1,untagBits)
|
||||
|
||||
val s0_valid = io.req.valid || s1_valid && stall
|
||||
@ -81,7 +81,7 @@ class ICache(latency: Int)(implicit p: Parameters) extends CoreModule()(p) with
|
||||
val tag_array = SeqMem(nSets, Vec(nWays, Bits(width = entagbits)))
|
||||
val tag_rdata = tag_array.read(s0_vaddr(untagBits-1,blockOffBits), !refill_done && s0_valid)
|
||||
when (refill_done) {
|
||||
val tag = code.encode(refill_tag).toUInt
|
||||
val tag = code.encode(refill_tag)
|
||||
tag_array.write(s1_idx, Vec.fill(nWays)(tag), Vec.tabulate(nWays)(repl_way === _))
|
||||
}
|
||||
|
||||
@ -115,7 +115,7 @@ class ICache(latency: Int)(implicit p: Parameters) extends CoreModule()(p) with
|
||||
val data_array = SeqMem(nSets * refillCycles, Bits(width = code.width(rowBits)))
|
||||
val wen = narrow_grant.valid && repl_way === UInt(i)
|
||||
when (wen) {
|
||||
val e_d = code.encode(narrow_grant.bits.data).toUInt
|
||||
val e_d = code.encode(narrow_grant.bits.data)
|
||||
data_array.write((s1_idx << log2Ceil(refillCycles)) | refill_cnt, e_d)
|
||||
}
|
||||
val s0_raddr = s0_vaddr(untagBits-1,blockOffBits-log2Ceil(refillCycles))
|
||||
|
@ -94,10 +94,10 @@ class MulDiv(
|
||||
when (state === s_busy && isMul) {
|
||||
val mulReg = Cat(remainder(2*mulw+1,w+1),remainder(w-1,0))
|
||||
val mplier = mulReg(mulw-1,0)
|
||||
val accum = mulReg(2*mulw,mulw).toSInt
|
||||
val mpcand = divisor.toSInt
|
||||
val accum = mulReg(2*mulw,mulw).asSInt
|
||||
val mpcand = divisor.asSInt
|
||||
val prod = mplier(unroll-1,0) * mpcand + accum
|
||||
val nextMulReg = Cat(prod, mplier(mulw-1,unroll)).toUInt
|
||||
val nextMulReg = Cat(prod, mplier(mulw-1,unroll))
|
||||
|
||||
val eOutMask = (SInt(BigInt(-1) << mulw) >> (count * unroll)(log2Up(mulw)-1,0))(mulw-1,0)
|
||||
val eOut = Bool(earlyOut) && count =/= mulw/unroll-1 && count =/= 0 &&
|
||||
|
@ -408,7 +408,7 @@ class MSHR(id: Int)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
|
||||
io.mem_req.valid := state === s_refill_req && fq.io.enq.ready
|
||||
io.mem_req.bits := req.old_meta.coh.makeAcquire(
|
||||
addr_block = Cat(io.tag, req_idx).toUInt,
|
||||
addr_block = Cat(io.tag, req_idx),
|
||||
client_xact_id = Bits(id),
|
||||
op_code = req.cmd)
|
||||
|
||||
@ -419,7 +419,7 @@ class MSHR(id: Int)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
io.replay.valid := state === s_drain_rpq && rpq.io.deq.valid
|
||||
io.replay.bits := rpq.io.deq.bits
|
||||
io.replay.bits.phys := Bool(true)
|
||||
io.replay.bits.addr := Cat(io.tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)).toUInt
|
||||
io.replay.bits.addr := Cat(io.tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0))
|
||||
|
||||
when (!io.meta_read.ready) {
|
||||
rpq.io.deq.ready := Bool(false)
|
||||
@ -766,14 +766,14 @@ class DataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
val data = Vec.fill(rowWords)(io.write.bits.data(encDataBits*(p+1)-1,encDataBits*p))
|
||||
array.write(waddr, data, wway_en.toBools)
|
||||
}
|
||||
resp(p) := array.read(raddr, rway_en.orR && io.read.valid).toBits
|
||||
resp(p) := array.read(raddr, rway_en.orR && io.read.valid).asUInt
|
||||
}
|
||||
for (dw <- 0 until rowWords) {
|
||||
val r = Vec(resp.map(_(encDataBits*(dw+1)-1,encDataBits*dw)))
|
||||
val resp_mux =
|
||||
if (r.size == 1) r
|
||||
else Vec(r(r_raddr(rowOffBits-1,wordOffBits)), r.tail:_*)
|
||||
io.resp(w+dw) := resp_mux.toBits
|
||||
io.resp(w+dw) := resp_mux.asUInt
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -783,7 +783,7 @@ class DataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
val data = Vec.tabulate(rowWords)(i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i))
|
||||
array.write(waddr, data, io.write.bits.wmask.toBools)
|
||||
}
|
||||
io.resp(w) := array.read(raddr, io.read.bits.way_en(w) && io.read.valid).toBits
|
||||
io.resp(w) := array.read(raddr, io.read.bits.way_en(w) && io.read.valid).asUInt
|
||||
}
|
||||
}
|
||||
|
||||
@ -893,7 +893,7 @@ class HellaCache(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
writeArb.io.out.ready := data.io.write.ready
|
||||
data.io.write.bits := writeArb.io.out.bits
|
||||
val wdata_encoded = (0 until rowWords).map(i => code.encode(writeArb.io.out.bits.data(coreDataBits*(i+1)-1,coreDataBits*i)))
|
||||
data.io.write.bits.data := wdata_encoded.toBits
|
||||
data.io.write.bits.data := wdata_encoded.asUInt
|
||||
|
||||
// tag read for new requests
|
||||
metaReadArb.io.in(4).valid := io.cpu.req.valid
|
||||
@ -915,8 +915,8 @@ class HellaCache(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
|
||||
// tag check and way muxing
|
||||
def wayMap[T <: Data](f: Int => T) = Vec((0 until nWays).map(f))
|
||||
val s1_tag_eq_way = wayMap((w: Int) => meta.io.resp(w).tag === (s1_addr >> untagBits)).toBits
|
||||
val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.isValid()).toBits
|
||||
val s1_tag_eq_way = wayMap((w: Int) => meta.io.resp(w).tag === (s1_addr >> untagBits)).asUInt
|
||||
val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.isValid()).asUInt
|
||||
s1_clk_en := metaReadArb.io.out.valid //TODO: should be metaReadArb.io.out.fire(), but triggers Verilog backend bug
|
||||
val s1_writeback = s1_clk_en && !s1_valid && !s1_replay
|
||||
val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_clk_en)
|
||||
@ -953,14 +953,14 @@ class HellaCache(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
||||
val en = en1 && ((Bool(i == 0) || !Bool(doNarrowRead)) || s1_writeback)
|
||||
when (en) { regs(i) := data.io.resp(w) >> encDataBits*i }
|
||||
}
|
||||
s2_data(w) := regs.toBits
|
||||
s2_data(w) := regs.asUInt
|
||||
}
|
||||
val s2_data_muxed = Mux1H(s2_tag_match_way, s2_data)
|
||||
val s2_data_decoded = (0 until rowWords).map(i => code.decode(s2_data_muxed(encDataBits*(i+1)-1,encDataBits*i)))
|
||||
val s2_data_corrected = s2_data_decoded.map(_.corrected).toBits
|
||||
val s2_data_uncorrected = s2_data_decoded.map(_.uncorrected).toBits
|
||||
val s2_data_corrected = s2_data_decoded.map(_.corrected).asUInt
|
||||
val s2_data_uncorrected = s2_data_decoded.map(_.uncorrected).asUInt
|
||||
val s2_word_idx = if(doNarrowRead) UInt(0) else s2_req.addr(log2Up(rowWords*coreDataBytes)-1,log2Up(wordBytes))
|
||||
val s2_data_correctable = s2_data_decoded.map(_.correctable).toBits()(s2_word_idx)
|
||||
val s2_data_correctable = s2_data_decoded.map(_.correctable).asUInt()(s2_word_idx)
|
||||
|
||||
// store/amo hits
|
||||
s3_valid := (s2_valid_masked && s2_hit || s2_replay) && !s2_sc_fail && isWrite(s2_req.cmd)
|
||||
|
@ -95,7 +95,7 @@ class PTW(n: Int)(implicit p: Parameters) extends CoreModule()(p) {
|
||||
when ((tmp.ppn >> ppnBits) =/= 0) { res.v := false }
|
||||
res
|
||||
}
|
||||
val pte_addr = Cat(r_pte.ppn, vpn_idx).toUInt << log2Up(xLen/8)
|
||||
val pte_addr = Cat(r_pte.ppn, vpn_idx) << log2Up(xLen/8)
|
||||
|
||||
when (arb.io.out.fire()) {
|
||||
r_req := arb.io.out.bits
|
||||
@ -110,7 +110,7 @@ class PTW(n: Int)(implicit p: Parameters) extends CoreModule()(p) {
|
||||
val tags = Reg(Vec(size, UInt(width = paddrBits)))
|
||||
val data = Reg(Vec(size, UInt(width = ppnBits)))
|
||||
|
||||
val hits = tags.map(_ === pte_addr).toBits & valid
|
||||
val hits = tags.map(_ === pte_addr).asUInt & valid
|
||||
val hit = hits.orR
|
||||
when (io.mem.resp.valid && pte.table() && !hit) {
|
||||
val r = Mux(valid.andR, plru.replace, PriorityEncoder(~valid))
|
||||
@ -138,7 +138,7 @@ class PTW(n: Int)(implicit p: Parameters) extends CoreModule()(p) {
|
||||
io.mem.req.bits.cmd := Mux(state === s_set_dirty, M_XA_OR, M_XRD)
|
||||
io.mem.req.bits.typ := MT_D
|
||||
io.mem.req.bits.addr := pte_addr
|
||||
io.mem.s1_data := pte_wdata.toBits
|
||||
io.mem.s1_data := pte_wdata.asUInt
|
||||
io.mem.s1_kill := Bool(false)
|
||||
io.mem.invalidate_lr := Bool(false)
|
||||
|
||||
|
@ -113,12 +113,12 @@ class RegFile(n: Int, w: Int, zero: Boolean = false) {
|
||||
|
||||
object ImmGen {
|
||||
def apply(sel: UInt, inst: UInt) = {
|
||||
val sign = Mux(sel === IMM_Z, SInt(0), inst(31).toSInt)
|
||||
val b30_20 = Mux(sel === IMM_U, inst(30,20).toSInt, sign)
|
||||
val b19_12 = Mux(sel =/= IMM_U && sel =/= IMM_UJ, sign, inst(19,12).toSInt)
|
||||
val sign = Mux(sel === IMM_Z, SInt(0), inst(31).asSInt)
|
||||
val b30_20 = Mux(sel === IMM_U, inst(30,20).asSInt, sign)
|
||||
val b19_12 = Mux(sel =/= IMM_U && sel =/= IMM_UJ, sign, inst(19,12).asSInt)
|
||||
val b11 = Mux(sel === IMM_U || sel === IMM_Z, SInt(0),
|
||||
Mux(sel === IMM_UJ, inst(20).toSInt,
|
||||
Mux(sel === IMM_SB, inst(7).toSInt, sign)))
|
||||
Mux(sel === IMM_UJ, inst(20).asSInt,
|
||||
Mux(sel === IMM_SB, inst(7).asSInt, sign)))
|
||||
val b10_5 = Mux(sel === IMM_U || sel === IMM_Z, Bits(0), inst(30,25))
|
||||
val b4_1 = Mux(sel === IMM_U, Bits(0),
|
||||
Mux(sel === IMM_S || sel === IMM_SB, inst(11,8),
|
||||
@ -127,7 +127,7 @@ object ImmGen {
|
||||
Mux(sel === IMM_I, inst(20),
|
||||
Mux(sel === IMM_Z, inst(15), Bits(0))))
|
||||
|
||||
Cat(sign, b30_20, b19_12, b11, b10_5, b4_1, b0).toSInt
|
||||
Cat(sign, b30_20, b19_12, b11, b10_5, b4_1, b0).asSInt
|
||||
}
|
||||
}
|
||||
|
||||
@ -285,18 +285,18 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p) {
|
||||
yield Mux(ex_reg_rs_bypass(i), bypass_mux(ex_reg_rs_lsb(i)), Cat(ex_reg_rs_msb(i), ex_reg_rs_lsb(i)))
|
||||
val ex_imm = ImmGen(ex_ctrl.sel_imm, ex_reg_inst)
|
||||
val ex_op1 = MuxLookup(ex_ctrl.sel_alu1, SInt(0), Seq(
|
||||
A1_RS1 -> ex_rs(0).toSInt,
|
||||
A1_PC -> ex_reg_pc.toSInt))
|
||||
A1_RS1 -> ex_rs(0).asSInt,
|
||||
A1_PC -> ex_reg_pc.asSInt))
|
||||
val ex_op2 = MuxLookup(ex_ctrl.sel_alu2, SInt(0), Seq(
|
||||
A2_RS2 -> ex_rs(1).toSInt,
|
||||
A2_RS2 -> ex_rs(1).asSInt,
|
||||
A2_IMM -> ex_imm,
|
||||
A2_SIZE -> Mux(ex_reg_rvc, SInt(2), SInt(4))))
|
||||
|
||||
val alu = Module(new ALU)
|
||||
alu.io.dw := ex_ctrl.alu_dw
|
||||
alu.io.fn := ex_ctrl.alu_fn
|
||||
alu.io.in2 := ex_op2.toUInt
|
||||
alu.io.in1 := ex_op1.toUInt
|
||||
alu.io.in2 := ex_op2.asUInt
|
||||
alu.io.in1 := ex_op1.asUInt
|
||||
|
||||
// multiplier and divider
|
||||
val div = Module(new MulDiv(width = xLen,
|
||||
@ -372,14 +372,14 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p) {
|
||||
|
||||
// memory stage
|
||||
val mem_br_taken = mem_reg_wdata(0)
|
||||
val mem_br_target = mem_reg_pc.toSInt +
|
||||
val mem_br_target = mem_reg_pc.asSInt +
|
||||
Mux(mem_ctrl.branch && mem_br_taken, ImmGen(IMM_SB, mem_reg_inst),
|
||||
Mux(mem_ctrl.jal, ImmGen(IMM_UJ, mem_reg_inst),
|
||||
Mux(mem_reg_rvc, SInt(2), SInt(4))))
|
||||
val mem_npc = (Mux(mem_ctrl.jalr, encodeVirtualAddress(mem_reg_wdata, mem_reg_wdata).toSInt, mem_br_target) & SInt(-2)).toUInt
|
||||
val mem_npc = (Mux(mem_ctrl.jalr, encodeVirtualAddress(mem_reg_wdata, mem_reg_wdata).asSInt, mem_br_target) & SInt(-2)).asUInt
|
||||
val mem_wrong_npc = Mux(ex_pc_valid, mem_npc =/= ex_reg_pc, Mux(ibuf.io.inst(0).valid, mem_npc =/= ibuf.io.pc, Bool(true)))
|
||||
val mem_npc_misaligned = if (usingCompressed) Bool(false) else mem_npc(1)
|
||||
val mem_int_wdata = Mux(!mem_reg_xcpt && (mem_ctrl.jalr ^ mem_npc_misaligned), mem_br_target, mem_reg_wdata.toSInt).toUInt
|
||||
val mem_int_wdata = Mux(!mem_reg_xcpt && (mem_ctrl.jalr ^ mem_npc_misaligned), mem_br_target, mem_reg_wdata.asSInt).asUInt
|
||||
val mem_cfi = mem_ctrl.branch || mem_ctrl.jalr || mem_ctrl.jal
|
||||
val mem_cfi_taken = (mem_ctrl.branch && mem_br_taken) || mem_ctrl.jalr || mem_ctrl.jal
|
||||
val mem_misprediction =
|
||||
@ -569,9 +569,9 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p) {
|
||||
io.imem.req.valid := take_pc
|
||||
io.imem.req.bits.speculative := !take_pc_wb
|
||||
io.imem.req.bits.pc :=
|
||||
Mux(wb_xcpt || csr.io.eret, csr.io.evec, // exception or [m|s]ret
|
||||
Mux(replay_wb, wb_reg_pc, // replay
|
||||
mem_npc)).toUInt // mispredicted branch
|
||||
Mux(wb_xcpt || csr.io.eret, csr.io.evec, // exception or [m|s]ret
|
||||
Mux(replay_wb, wb_reg_pc, // replay
|
||||
mem_npc)) // mispredicted branch
|
||||
io.imem.flush_icache := wb_reg_valid && wb_ctrl.fence_i && !io.dmem.s2_nack
|
||||
io.imem.flush_tlb := csr.io.fatc
|
||||
|
||||
@ -676,10 +676,10 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p) {
|
||||
// efficient means to compress 64-bit VA into vaddrBits+1 bits
|
||||
// (VA is bad if VA(vaddrBits) != VA(vaddrBits-1))
|
||||
val a = a0 >> vaddrBits-1
|
||||
val e = ea(vaddrBits,vaddrBits-1).toSInt
|
||||
val e = ea(vaddrBits,vaddrBits-1).asSInt
|
||||
val msb =
|
||||
Mux(a === UInt(0) || a === UInt(1), e =/= SInt(0),
|
||||
Mux(a.toSInt === SInt(-1) || a.toSInt === SInt(-2), e === SInt(-1), e(0)))
|
||||
Mux(a.asSInt === SInt(-1) || a.asSInt === SInt(-2), e === SInt(-1), e(0)))
|
||||
Cat(msb, ea(vaddrBits-1,0))
|
||||
}
|
||||
|
||||
|
@ -52,9 +52,9 @@ class TLB(implicit val p: Parameters) extends Module with HasTLBParameters {
|
||||
val r_refill_waddr = Reg(UInt(width = log2Ceil(entries)))
|
||||
val r_req = Reg(new TLBReq)
|
||||
|
||||
val lookup_tag = Cat(io.ptw.ptbr.asid, io.req.bits.vpn(vpnBits-1,0)).toUInt
|
||||
val lookup_tag = Cat(io.ptw.ptbr.asid, io.req.bits.vpn(vpnBits-1,0))
|
||||
val hitsVec = (0 until entries).map(i => valid(i) && tags(i) === lookup_tag)
|
||||
val hits = hitsVec.toBits
|
||||
val hits = hitsVec.asUInt
|
||||
|
||||
// permission bit arrays
|
||||
val pte_array = Reg(new PTE)
|
||||
|
@ -41,7 +41,7 @@ object Util {
|
||||
}
|
||||
}
|
||||
|
||||
def toBits(): UInt = Cat(x.map(_.toBits).reverse)
|
||||
def asUInt(): UInt = Cat(x.map(_.asUInt).reverse)
|
||||
}
|
||||
|
||||
implicit class UIntIsOneOf(val x: UInt) extends AnyVal {
|
||||
@ -167,7 +167,7 @@ object Random
|
||||
def apply(mod: Int): UInt = apply(mod, randomizer)
|
||||
def oneHot(mod: Int, random: UInt): UInt = {
|
||||
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
|
||||
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).toBits
|
||||
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
|
||||
}
|
||||
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
|
||||
|
||||
|
@ -8,4 +8,18 @@ package object Util {
|
||||
|
||||
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
|
||||
}
|
||||
|
||||
implicit class SeqToAugmentedSeq[T <: Data](val x: Seq[T]) extends AnyVal {
|
||||
def apply(idx: UInt): T = {
|
||||
if (x.size == 1) {
|
||||
x.head
|
||||
} else {
|
||||
val half = 1 << (log2Ceil(x.size) - 1)
|
||||
val newIdx = idx & UInt(half - 1)
|
||||
Mux(idx >= UInt(half), x.drop(half)(newIdx), x.take(half)(newIdx))
|
||||
}
|
||||
}
|
||||
|
||||
def asUInt(): UInt = Cat(x.map(_.asUInt).reverse)
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import junctions._
|
||||
import uncore.tilelink._
|
||||
import uncore.converters._
|
||||
import uncore.coherence._
|
||||
import uncore.Util._
|
||||
|
||||
case object NReleaseTransactors extends Field[Int]
|
||||
case object NProbeTransactors extends Field[Int]
|
||||
@ -59,7 +60,7 @@ trait HasCoherenceAgentWiringHelpers {
|
||||
val idx = in.bits.manager_xact_id
|
||||
outs.map(_.bits := in.bits)
|
||||
outs.zipWithIndex.map { case (o,i) => o.valid := in.valid && idx === UInt(i) }
|
||||
in.ready := Vec(outs.map(_.ready)).read(idx)
|
||||
in.ready := outs.map(_.ready).apply(idx)
|
||||
}
|
||||
|
||||
/** Broadcasts valid messages on this channel to all trackers,
|
||||
@ -82,10 +83,10 @@ trait HasCoherenceAgentWiringHelpers {
|
||||
dataOverrides: Option[Seq[UInt]] = None,
|
||||
allocOverride: Option[Bool] = None,
|
||||
matchOverride: Option[Bool] = None) {
|
||||
val ready_bits = Vec(outs.map(_.ready)).toBits
|
||||
val can_alloc_bits = Vec(allocs.map(_.can)).toBits
|
||||
val ready_bits = outs.map(_.ready).asUInt
|
||||
val can_alloc_bits = allocs.map(_.can).asUInt
|
||||
val should_alloc_bits = PriorityEncoderOH(can_alloc_bits)
|
||||
val match_bits = Vec(allocs.map(_.matches)).toBits
|
||||
val match_bits = allocs.map(_.matches).asUInt
|
||||
val no_matches = !match_bits.orR
|
||||
val alloc_ok = allocOverride.getOrElse(Bool(true))
|
||||
val match_ok = matchOverride.getOrElse(Bool(true))
|
||||
|
@ -151,9 +151,9 @@ class MetadataArray[T <: Metadata](onReset: () => T)(implicit p: Parameters) ext
|
||||
val rst_cnt = Reg(init=UInt(0, log2Up(nSets+1)))
|
||||
val rst = rst_cnt < UInt(nSets)
|
||||
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
|
||||
val wdata = Mux(rst, rstVal, io.write.bits.data).toBits
|
||||
val wmask = Mux(rst || Bool(nWays == 1), SInt(-1), io.write.bits.way_en.toSInt).toBools
|
||||
val rmask = Mux(rst || Bool(nWays == 1), SInt(-1), io.read.bits.way_en.toSInt).toBools
|
||||
val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt
|
||||
val wmask = Mux(rst || Bool(nWays == 1), SInt(-1), io.write.bits.way_en.asSInt).toBools
|
||||
val rmask = Mux(rst || Bool(nWays == 1), SInt(-1), io.read.bits.way_en.asSInt).toBools
|
||||
when (rst) { rst_cnt := rst_cnt+UInt(1) }
|
||||
|
||||
val metabits = rstVal.getWidth
|
||||
@ -161,21 +161,18 @@ class MetadataArray[T <: Metadata](onReset: () => T)(implicit p: Parameters) ext
|
||||
if (hasSplitMetadata) {
|
||||
val tag_arrs = List.fill(nWays){ SeqMem(nSets, UInt(width = metabits)) }
|
||||
val tag_readout = Wire(Vec(nWays,rstVal.cloneType))
|
||||
val tags_vec = Wire(Vec(nWays, UInt(width = metabits)))
|
||||
(0 until nWays).foreach { (i) =>
|
||||
when (rst || (io.write.valid && wmask(i))) {
|
||||
tag_arrs(i).write(waddr, wdata)
|
||||
}
|
||||
tags_vec(i) := tag_arrs(i).read(io.read.bits.idx, io.read.valid && rmask(i))
|
||||
io.resp(i) := rstVal.fromBits(tag_arrs(i).read(io.read.bits.idx, io.read.valid && rmask(i)))
|
||||
}
|
||||
io.resp := io.resp.fromBits(tags_vec.toBits)
|
||||
} else {
|
||||
val tag_arr = SeqMem(nSets, Vec(nWays, UInt(width = metabits)))
|
||||
when (rst || io.write.valid) {
|
||||
tag_arr.write(waddr, Vec.fill(nWays)(wdata), wmask)
|
||||
}
|
||||
val tags = tag_arr.read(io.read.bits.idx, io.read.valid).toBits
|
||||
io.resp := io.resp.fromBits(tags)
|
||||
io.resp := tag_arr.read(io.read.bits.idx, io.read.valid).map(rstVal.fromBits(_))
|
||||
}
|
||||
|
||||
io.read.ready := !rst && !io.write.valid // so really this could be a 6T RAM
|
||||
@ -329,7 +326,7 @@ class L2MetadataArray(implicit p: Parameters) extends L2HellaCacheModule()(p) {
|
||||
val meta = Module(new MetadataArray(onReset _))
|
||||
meta.io.read <> io.read
|
||||
meta.io.write <> io.write
|
||||
val way_en_1h = (Vec.fill(nWays){Bool(true)}).toBits
|
||||
val way_en_1h = UInt((BigInt(1) << nWays) - 1)
|
||||
val s1_way_en_1h = RegEnable(way_en_1h, io.read.valid)
|
||||
meta.io.read.bits.way_en := way_en_1h
|
||||
|
||||
@ -338,7 +335,7 @@ class L2MetadataArray(implicit p: Parameters) extends L2HellaCacheModule()(p) {
|
||||
def wayMap[T <: Data](f: Int => T) = Vec((0 until nWays).map(f))
|
||||
val s1_clk_en = Reg(next = io.read.fire())
|
||||
val s1_tag_eq_way = wayMap((w: Int) => meta.io.resp(w).tag === s1_tag)
|
||||
val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.outer.isValid() && s1_way_en_1h(w).toBool).toBits
|
||||
val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.outer.isValid() && s1_way_en_1h(w).toBool).asUInt
|
||||
val s1_idx = RegEnable(io.read.bits.idx, io.read.valid) // deal with stalls?
|
||||
val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_clk_en)
|
||||
val s2_tag_match = s2_tag_match_way.orR
|
||||
@ -445,7 +442,7 @@ class L2DataArray(delay: Int)(implicit p: Parameters) extends L2HellaCacheModule
|
||||
|
||||
val r_req = Pipe(io.read.fire(), io.read.bits)
|
||||
io.resp := Pipe(r_req.valid, r_req.bits, delay)
|
||||
io.resp.bits.data := Pipe(r_req.valid, array.read(raddr, ren).toBits, delay).bits
|
||||
io.resp.bits.data := Pipe(r_req.valid, array.read(raddr, ren).asUInt, delay).bits
|
||||
io.read.ready := !io.write.valid
|
||||
io.write.ready := Bool(true)
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
package uncore.agents
|
||||
|
||||
import Chisel._
|
||||
import uncore.Util._
|
||||
|
||||
abstract class Decoding
|
||||
{
|
||||
@ -63,24 +64,23 @@ class SECCode extends Code
|
||||
} else
|
||||
x(mapping(i))
|
||||
}
|
||||
Vec(y).toBits
|
||||
y.asUInt
|
||||
}
|
||||
def decode(y: UInt) = new Decoding {
|
||||
val n = y.getWidth
|
||||
require(n > 0 && !isPow2(n))
|
||||
|
||||
val p2 = for (i <- 0 until log2Up(n)) yield 1 << i
|
||||
val syndrome = p2 map { i =>
|
||||
val syndrome = (p2 map { i =>
|
||||
val r = for (j <- 1 to n; if (j & i) != 0)
|
||||
yield y(j-1)
|
||||
r reduce (_^_)
|
||||
}
|
||||
val s = Vec(syndrome).toBits
|
||||
}).asUInt
|
||||
|
||||
private def swizzle(z: UInt) = Vec((1 to n).filter(i => !isPow2(i)).map(i => z(i-1))).toBits
|
||||
private def swizzle(z: UInt) = (1 to n).filter(i => !isPow2(i)).map(i => z(i-1)).asUInt
|
||||
def uncorrected = swizzle(y)
|
||||
def corrected = swizzle(((y.toUInt << 1) ^ UIntToOH(s)) >> 1)
|
||||
def correctable = s.orR
|
||||
def corrected = swizzle(((y << 1) ^ UIntToOH(syndrome)) >> 1)
|
||||
def correctable = syndrome.orR
|
||||
def uncorrectable = Bool(false)
|
||||
}
|
||||
private def mapping(i: Int) = i-1-log2Up(i)
|
||||
|
@ -52,7 +52,7 @@ trait HasStoreDataQueue extends HasStoreDataQueueParameters {
|
||||
).reduce(_||_)
|
||||
|
||||
lazy val sdqLoc = List.fill(nTransactors) {
|
||||
DataQueueLocation(sdq_alloc_id, inStoreQueue).toBits
|
||||
DataQueueLocation(sdq_alloc_id, inStoreQueue).asUInt
|
||||
}
|
||||
|
||||
/*
|
||||
@ -74,7 +74,7 @@ trait HasStoreDataQueue extends HasStoreDataQueueParameters {
|
||||
lazy val vwbqLoc = (0 until nTransactors).map(i =>
|
||||
(DataQueueLocation(rel_data_cnt,
|
||||
(if(i < nReleaseTransactors) inVolWBQueue
|
||||
else inClientReleaseQueue)).toBits))
|
||||
else inClientReleaseQueue)).asUInt))
|
||||
/*
|
||||
doInputRoutingWithAllocation(
|
||||
io.inner.release,
|
||||
|
@ -380,7 +380,7 @@ trait EmitsInnerProbes extends HasBlockAddressBuffer
|
||||
def full_representation: UInt
|
||||
def initializeProbes() {
|
||||
if (needs_probes)
|
||||
pending_iprbs := full_representation & ~io.incoherent.toBits
|
||||
pending_iprbs := full_representation & ~io.incoherent.asUInt
|
||||
else
|
||||
pending_iprbs := UInt(0)
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ class NullRepresentation(nClients: Int) extends DirectoryRepresentation(1) {
|
||||
def one(s: UInt) = Bool(false)
|
||||
def count(s: UInt) = UInt(nClients)
|
||||
def next(s: UInt) = UInt(0)
|
||||
def full(s: UInt) = SInt(-1, width = nClients).toUInt
|
||||
def full(s: UInt) = SInt(-1, width = nClients).asUInt
|
||||
}
|
||||
|
||||
class FullRepresentation(nClients: Int) extends DirectoryRepresentation(nClients) {
|
||||
|
@ -170,7 +170,7 @@ class NastiIOTileLinkIOConverter(implicit p: Parameters) extends TLModule()(p)
|
||||
val odd_outside_0 = Seq.tabulate (len/2) { i => all_outside_0(2*i+1) }
|
||||
val odd_outside = odd_outside_0.reduce (_ || _)
|
||||
val all_outside = all_outside_0.reduce (_ || _)
|
||||
val offset = Cat(sub_offset, odd_outside.toBits)
|
||||
val offset = Cat(sub_offset, odd_outside)
|
||||
val size = Mux(all_outside, UInt(defsize), sub_size)
|
||||
(all_outside_0, offset, size)
|
||||
}
|
||||
|
@ -263,8 +263,8 @@ class TileLinkIOWidener(innerTLId: String, outerTLId: String)
|
||||
client_xact_id = put_id,
|
||||
addr_block = put_block,
|
||||
addr_beat = put_beat,
|
||||
data = put_data.toBits,
|
||||
wmask = Some(put_wmask.toBits))(outerConfig)
|
||||
data = put_data.asUInt,
|
||||
wmask = Some(put_wmask.asUInt))(outerConfig)
|
||||
|
||||
io.out.acquire.valid := sending_put || (!shrink && io.in.acquire.valid)
|
||||
io.out.acquire.bits := MuxCase(get_block_acquire, Seq(
|
||||
@ -505,7 +505,7 @@ class TileLinkIONarrower(innerTLId: String, outerTLId: String)
|
||||
client_xact_id = gnt_client_id,
|
||||
manager_xact_id = gnt_manager_id,
|
||||
addr_beat = ignt_ctr.value,
|
||||
data = gnt_data_buffer.toBits)(innerConfig)
|
||||
data = gnt_data_buffer.asUInt)(innerConfig)
|
||||
|
||||
val smallget_grant = ognt.g_type === Grant.getDataBeatType
|
||||
|
||||
|
@ -224,14 +224,11 @@ case class DebugModuleConfig (
|
||||
|
||||
val hasHaltSum = (nComponents > 64) || (nSerialPorts > 0)
|
||||
|
||||
val hasDebugRom = debugRomContents match{
|
||||
case Some(_) => true
|
||||
case None => false
|
||||
}
|
||||
val hasDebugRom = debugRomContents.nonEmpty
|
||||
|
||||
if (hasDebugRom) {
|
||||
require (debugRomContents.size > 0)
|
||||
require (debugRomContents.size <= 512)
|
||||
require (debugRomContents.get.size > 0)
|
||||
require (debugRomContents.get.size <= 512)
|
||||
}
|
||||
|
||||
require (nNDResetCycles > 0)
|
||||
@ -647,15 +644,15 @@ class DebugModule ()(implicit val p:cde.Parameters)
|
||||
|
||||
sbRamRdData := ramRdData
|
||||
|
||||
ramWrMask := Mux(sbRamWrEn, sbWrMask, dbRamWrMask.toBits())
|
||||
ramWrMask := Mux(sbRamWrEn, sbWrMask, dbRamWrMask.asUInt)
|
||||
|
||||
assert (!((dbRamWrEn | dbRamRdEn) & (sbRamRdEn | sbRamWrEn)), "Stall logic should have prevented concurrent SB/DB RAM Access")
|
||||
|
||||
// Make copies of DB RAM data before writing.
|
||||
val dbRamWrDataVec = Fill(1 << (dbRamAddrWidth - ramAddrWidth), dbRamWrData)
|
||||
ramWrData := Mux(sbRamWrEn,
|
||||
(ramWrMask & sbRamWrData ) | (~ramWrMask & ramRdData),
|
||||
(ramWrMask & dbRamWrDataVec.toBits) | (~ramWrMask & ramRdData))
|
||||
(ramWrMask & sbRamWrData ) | (~ramWrMask & ramRdData),
|
||||
(ramWrMask & dbRamWrDataVec) | (~ramWrMask & ramRdData))
|
||||
|
||||
ramAddr := Mux(sbRamWrEn | sbRamRdEn, sbRamAddr,
|
||||
dbRamAddr >> (dbRamAddrWidth - ramAddrWidth))
|
||||
@ -700,7 +697,7 @@ class DebugModule ()(implicit val p:cde.Parameters)
|
||||
// haltnot handled in other logic
|
||||
if (cfg.hasBusMaster){
|
||||
// buserror is set 'until 0 is written to any bit in this field'.
|
||||
CONTROLReg.buserror := Mux((CONTROLWrData.buserror === SInt(-1).toBits), CONTROLReg.buserror, UInt(0))
|
||||
CONTROLReg.buserror := Mux(CONTROLWrData.buserror.andR, CONTROLReg.buserror, UInt(0))
|
||||
CONTROLReg.autoincrement := CONTROLWrData.autoincrement
|
||||
CONTROLReg.access := CONTROLWrData.access
|
||||
}
|
||||
@ -743,15 +740,15 @@ class DebugModule ()(implicit val p:cde.Parameters)
|
||||
|
||||
dbRamRdEn := Bool(false)
|
||||
when ((dbReq.addr >> 4) === Bits(0)) { // 0x00 - 0x0F Debug RAM
|
||||
dbRdData := RAMRdData.toBits()
|
||||
dbRdData := RAMRdData.asUInt
|
||||
dbRamRdEn := dbRdEn
|
||||
}.elsewhen (dbReq.addr === DMCONTROL) {
|
||||
dbRdData := CONTROLRdData.toBits()
|
||||
dbRdData := CONTROLRdData.asUInt
|
||||
}.elsewhen (dbReq.addr === DMINFO) {
|
||||
dbRdData := DMINFORdData.toBits()
|
||||
dbRdData := DMINFORdData.asUInt
|
||||
}.elsewhen (dbReq.addr === HALTSUM) {
|
||||
if (cfg.hasHaltSum){
|
||||
dbRdData := HALTSUMRdData.toBits()
|
||||
dbRdData := HALTSUMRdData.asUInt
|
||||
} else {
|
||||
dbRdData := UInt(0)
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import cde.{Parameters, Field}
|
||||
import junctions._
|
||||
import junctions.NastiConstants._
|
||||
import uncore.tilelink._
|
||||
import uncore.Util._
|
||||
|
||||
case object NDmaTransactors extends Field[Int]
|
||||
case object NDmaXacts extends Field[Int]
|
||||
@ -104,7 +105,7 @@ class DmaManager(outstandingCSR: Int)(implicit p: Parameters)
|
||||
val ctrl_regs = Reg(Vec(nCtrlWords, UInt(width = nastiXDataBits)))
|
||||
val ctrl_idx = Reg(UInt(width = log2Up(nCtrlWords)))
|
||||
val ctrl_done = Reg(Bool())
|
||||
val ctrl_blob = ctrl_regs.toBits
|
||||
val ctrl_blob = ctrl_regs.asUInt
|
||||
val ctrl_id = Reg(UInt(width = nastiXIdBits))
|
||||
|
||||
val sizeOffset = 3 * addrBits
|
||||
@ -228,7 +229,7 @@ class DmaTrackerFile(implicit p: Parameters) extends DmaModule()(p) {
|
||||
}
|
||||
|
||||
val trackers = List.fill(nDmaTransactors) { Module(new DmaTracker) }
|
||||
val reqReadys = Vec(trackers.map(_.io.dma.req.ready)).toBits
|
||||
val reqReadys = trackers.map(_.io.dma.req.ready).asUInt
|
||||
|
||||
io.mem <> trackers.map(_.io.mem)
|
||||
io.mmio <> trackers.map(_.io.mmio)
|
||||
@ -305,10 +306,10 @@ class DmaTracker(implicit p: Parameters) extends DmaModule()(p)
|
||||
val (put_beat, put_done) = Counter(
|
||||
io.mem.acquire.fire() && acq.hasData(), tlDataBeats)
|
||||
|
||||
val put_mask = Vec.tabulate(tlDataBytes) { i =>
|
||||
val put_mask = Seq.tabulate(tlDataBytes) { i =>
|
||||
val byte_index = Cat(put_beat, UInt(i, tlByteAddrBits))
|
||||
byte_index >= offset && byte_index < bytes_left
|
||||
}.toBits
|
||||
}.asUInt
|
||||
|
||||
val prefetch_sent = io.mem.acquire.fire() && io.mem.acquire.bits.isPrefetch()
|
||||
val prefetch_busy = Reg(init = UInt(0, tlMaxClientXacts))
|
||||
@ -324,14 +325,14 @@ class DmaTracker(implicit p: Parameters) extends DmaModule()(p)
|
||||
(value >> sel)(0)
|
||||
|
||||
when (alignment === UInt(0)) {
|
||||
put_data := data_buffer.read(base_index)
|
||||
put_data := data_buffer(base_index)
|
||||
} .elsewhen (shift_dir) {
|
||||
val shift_index = base_index - beat_align
|
||||
when (bit_align === UInt(0)) {
|
||||
put_data := data_buffer.read(shift_index)
|
||||
put_data := data_buffer(shift_index)
|
||||
} .otherwise {
|
||||
val upper_bits = data_buffer.read(shift_index)
|
||||
val lower_bits = data_buffer.read(shift_index - UInt(1))
|
||||
val upper_bits = data_buffer(shift_index)
|
||||
val lower_bits = data_buffer(shift_index - UInt(1))
|
||||
val upper_shifted = upper_bits << bit_align
|
||||
val lower_shifted = lower_bits >> rev_align
|
||||
put_data := upper_shifted | lower_shifted
|
||||
@ -339,10 +340,10 @@ class DmaTracker(implicit p: Parameters) extends DmaModule()(p)
|
||||
} .otherwise {
|
||||
val shift_index = base_index + beat_align
|
||||
when (bit_align === UInt(0)) {
|
||||
put_data := data_buffer.read(shift_index)
|
||||
put_data := data_buffer(shift_index)
|
||||
} .otherwise {
|
||||
val upper_bits = data_buffer.read(shift_index + UInt(1))
|
||||
val lower_bits = data_buffer.read(shift_index)
|
||||
val upper_bits = data_buffer(shift_index + UInt(1))
|
||||
val lower_bits = data_buffer(shift_index)
|
||||
val upper_shifted = upper_bits << rev_align
|
||||
val lower_shifted = lower_bits >> bit_align
|
||||
put_data := upper_shifted | lower_shifted
|
||||
@ -502,7 +503,7 @@ class DmaTracker(implicit p: Parameters) extends DmaModule()(p)
|
||||
val write_half = gnt.client_xact_id(0)
|
||||
val write_idx = Cat(write_half, gnt.addr_beat)
|
||||
get_inflight := get_inflight & ~UIntToOH(write_idx)
|
||||
data_buffer.write(write_idx, gnt.data)
|
||||
data_buffer(write_idx) := gnt.data
|
||||
} .otherwise {
|
||||
put_inflight := Bool(false)
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ class PLIC(val cfg: PLICConfig)(implicit val p: Parameters) extends Module
|
||||
val word =
|
||||
if (tlDataBytes >= pending.size) UInt(0)
|
||||
else addr(log2Up(pending.size)-1,log2Up(tlDataBytes))
|
||||
rdata := pending.toBits >> (word * tlDataBits)
|
||||
rdata := pending.asUInt >> (word * tlDataBits)
|
||||
}.otherwise {
|
||||
val regsPerBeat = tlDataBytes >> log2Up(cfg.priorityBytes)
|
||||
val word =
|
||||
|
@ -144,7 +144,7 @@ trait AppendsArbiterId extends TileLinkArbiterLike {
|
||||
else
|
||||
UInt(0)
|
||||
}
|
||||
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id(log2Up(arbN)-1,0).toUInt
|
||||
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id(log2Up(arbN)-1,0)
|
||||
}
|
||||
|
||||
/** Uses the client_xact_id as is (assumes it has been set to port index) */
|
||||
|
@ -397,7 +397,7 @@ object Acquire {
|
||||
Acquire.putPrefetchType -> Cat(M_XWR, alloc)))
|
||||
}
|
||||
|
||||
def fullWriteMask(implicit p: Parameters) = SInt(-1, width = p(TLKey(p(TLId))).writeMaskBits).toUInt
|
||||
def fullWriteMask(implicit p: Parameters) = SInt(-1, width = p(TLKey(p(TLId))).writeMaskBits).asUInt
|
||||
|
||||
// Most generic constructor
|
||||
def apply(
|
||||
|
@ -36,7 +36,7 @@ class StoreGenAligned(typ: UInt, addr: UInt, dat: UInt, maxSize: Int) extends St
|
||||
|
||||
class LoadGen(typ: UInt, addr: UInt, dat: UInt, zero: Bool, maxSize: Int) {
|
||||
private val t = new StoreGen(typ, addr, dat, maxSize)
|
||||
private val signed = typ.toSInt >= SInt(0)
|
||||
private val signed = typ.asSInt >= SInt(0)
|
||||
|
||||
private def genData(logMinSize: Int): UInt = {
|
||||
var res = dat
|
||||
@ -82,7 +82,7 @@ class AMOALU(rhsIsAligned: Boolean = false)(implicit p: Parameters) extends Modu
|
||||
if (operandBits == 32) io.lhs + rhs
|
||||
else {
|
||||
val mask = ~UInt(0,64) ^ (io.addr(2) << 31)
|
||||
(io.lhs & mask).toUInt + (rhs & mask)
|
||||
(io.lhs & mask) + (rhs & mask)
|
||||
}
|
||||
|
||||
val less =
|
||||
|
Loading…
Reference in New Issue
Block a user