util: resynchronize AsyncQueue counters when far side resets
If the other clock domain is much faster than ours, it's reset might be shorter than a single cycle in our domain. In that case, we need to catch the reset and extend it.
This commit is contained in:
parent
5e2609bdd2
commit
75bb94017b
@ -4,10 +4,10 @@ package util
|
|||||||
import Chisel._
|
import Chisel._
|
||||||
|
|
||||||
object GrayCounter {
|
object GrayCounter {
|
||||||
def apply(bits: Int, increment: Bool = Bool(true)): UInt = {
|
def apply(bits: Int, increment: Bool = Bool(true), clear: Bool = Bool(false)): UInt = {
|
||||||
val incremented = Wire(UInt(width=bits))
|
val incremented = Wire(UInt(width=bits))
|
||||||
val binary = AsyncResetReg(incremented, 0)
|
val binary = AsyncResetReg(incremented)
|
||||||
incremented := binary + increment.asUInt()
|
incremented := Mux(clear, UInt(0), binary + increment.asUInt())
|
||||||
incremented ^ (incremented >> UInt(1))
|
incremented ^ (incremented >> UInt(1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -38,18 +38,23 @@ class AsyncQueueSource[T <: Data](gen: T, depth: Int, sync: Int) extends Module
|
|||||||
val sink_reset_n = Bool().flip
|
val sink_reset_n = Bool().flip
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extend the sink reset to a full cycle (assertion latency <= 1 cycle)
|
||||||
|
val catch_reset_n = AsyncResetReg(Bool(true), clock, !io.sink_reset_n)
|
||||||
|
// reset_n has a 1 cycle shorter path to ready than ridx does
|
||||||
|
val reset_n = AsyncGrayCounter(catch_reset_n.asUInt, sync)(0)
|
||||||
|
|
||||||
val mem = Reg(Vec(depth, gen)) //This does NOT need to be asynchronously reset.
|
val mem = Reg(Vec(depth, gen)) //This does NOT need to be asynchronously reset.
|
||||||
val widx = GrayCounter(bits+1, io.enq.fire())
|
val widx = GrayCounter(bits+1, io.enq.fire(), !reset_n)
|
||||||
val ridx = AsyncGrayCounter(io.ridx, sync)
|
val ridx = AsyncGrayCounter(io.ridx, sync)
|
||||||
val ready = widx =/= (ridx ^ UInt(depth | depth >> 1))
|
val ready = widx =/= (ridx ^ UInt(depth | depth >> 1))
|
||||||
|
|
||||||
val index = if (depth == 1) UInt(0) else io.widx(bits-1, 0) ^ (io.widx(bits, bits) << (bits-1))
|
val index = if (depth == 1) UInt(0) else io.widx(bits-1, 0) ^ (io.widx(bits, bits) << (bits-1))
|
||||||
when (io.enq.fire()) { mem(index) := io.enq.bits }
|
when (io.enq.fire()) { mem(index) := io.enq.bits }
|
||||||
|
|
||||||
val ready_reg = AsyncResetReg(ready, 0)
|
val ready_reg = AsyncResetReg(ready.asUInt)(0)
|
||||||
io.enq.ready := ready_reg
|
io.enq.ready := ready_reg && reset_n
|
||||||
|
|
||||||
val widx_reg = AsyncResetReg(widx, 0)
|
val widx_reg = AsyncResetReg(widx)
|
||||||
io.widx := widx_reg
|
io.widx := widx_reg
|
||||||
|
|
||||||
io.mem := mem
|
io.mem := mem
|
||||||
@ -68,7 +73,12 @@ class AsyncQueueSink[T <: Data](gen: T, depth: Int, sync: Int) extends Module {
|
|||||||
val source_reset_n = Bool().flip
|
val source_reset_n = Bool().flip
|
||||||
}
|
}
|
||||||
|
|
||||||
val ridx = GrayCounter(bits+1, io.deq.fire())
|
// extend the source reset to a full cycle (assertion latency <= 1 cycle)
|
||||||
|
val catch_reset_n = AsyncResetReg(Bool(true), clock, !io.source_reset_n)
|
||||||
|
// reset_n has a 1 cycle shorter path to valid than widx does
|
||||||
|
val reset_n = AsyncGrayCounter(catch_reset_n.asUInt, sync)(0)
|
||||||
|
|
||||||
|
val ridx = GrayCounter(bits+1, io.deq.fire(), !reset_n)
|
||||||
val widx = AsyncGrayCounter(io.widx, sync)
|
val widx = AsyncGrayCounter(io.widx, sync)
|
||||||
val valid = ridx =/= widx
|
val valid = ridx =/= widx
|
||||||
|
|
||||||
@ -79,11 +89,13 @@ class AsyncQueueSink[T <: Data](gen: T, depth: Int, sync: Int) extends Module {
|
|||||||
val index = if (depth == 1) UInt(0) else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
|
val index = if (depth == 1) UInt(0) else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
|
||||||
// This register does not NEED to be reset, as its contents will not
|
// This register does not NEED to be reset, as its contents will not
|
||||||
// be considered unless the asynchronously reset deq valid register is set.
|
// be considered unless the asynchronously reset deq valid register is set.
|
||||||
io.deq.bits := RegEnable(io.mem(index), valid)
|
io.deq.bits := RegEnable(io.mem(index), valid)
|
||||||
|
|
||||||
io.deq.valid := AsyncResetReg(valid, 0)
|
|
||||||
|
|
||||||
io.ridx := AsyncResetReg(ridx, 0)
|
val valid_reg = AsyncResetReg(valid.asUInt)(0)
|
||||||
|
io.deq.valid := valid_reg && reset_n
|
||||||
|
|
||||||
|
val ridx_reg = AsyncResetReg(ridx)
|
||||||
|
io.ridx := ridx_reg
|
||||||
}
|
}
|
||||||
|
|
||||||
class AsyncQueue[T <: Data](gen: T, depth: Int = 8, sync: Int = 3) extends Crossing[T] {
|
class AsyncQueue[T <: Data](gen: T, depth: Int = 8, sync: Int = 3) extends Crossing[T] {
|
||||||
|
Loading…
Reference in New Issue
Block a user