summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2016-11-01 04:08:15 (GMT)
committerH. Peter Anvin <hpa@zytor.com>2016-11-01 04:08:15 (GMT)
commitb092a9e69fabfaf7303330655a4948382d528d62 (patch)
tree48cbd508c4debe5a9fbf98cc832c990c86522da9
parentd05b69e8086bdd02a851d8397edb43d002639d23 (diff)
downloadabc80-b092a9e69fabfaf7303330655a4948382d528d62.zip
abc80-b092a9e69fabfaf7303330655a4948382d528d62.tar.gz
abc80-b092a9e69fabfaf7303330655a4948382d528d62.tar.bz2
abc80-b092a9e69fabfaf7303330655a4948382d528d62.tar.xz
SRAM: implement the new 200 MHz shared state machine
Implement the new SRAM state machine, complete with dual access per cpu_clk cycle. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r--abc80.v167
1 files changed, 116 insertions, 51 deletions
diff --git a/abc80.v b/abc80.v
index 253fc24..2896498 100644
--- a/abc80.v
+++ b/abc80.v
@@ -265,7 +265,7 @@ module abc80 (
reg [5:0] fgxaddr; // Byte address horizontally
reg [8:0] fgyaddr; // Byte address vertically x 2 (see below)
- reg [7:0] sram_fgdata;
+ wire [7:0] sram_fgdata;
reg sram_fgack;
wire fgfull;
wire [4:0] fgwrusedw; // FIFO fill status
@@ -420,114 +420,179 @@ module abc80 (
// External SRAM
//
// The SRAM is very fast (10 ns) but is a asychronous part, so we
- // generate the SRAM cycles from the fast_clk to make sure things happen
- // in the right order. The SRAM is only active when actually addressed
- // by the CPU so it can be multiplexed with other functions.
+ // generate the SRAM cycles from a 200 MHz sram_clk to make sure
+ // things happen in the right order. The SRAM is only active when
+ // actually addressed by the CPU so it can be multiplexed with
+ // other functions.
//
- // Events per 100 MHz cycle, numbers correspond to fast_clk_phase:
+ // Events per 25 MHz CPU cycle, numbers correspond to sram_clk_phase:
//
- // CPU read:
- // 0. MMU address is output and CS# and OE# asserted
+ // CPU or fgfifo read:
+ // 0. MMU/fgfifo address is output and CE# and OE# asserted
// 1. Nop
- // 2. Latch CPU data, output mpled driver address, CS# and OE# asserted
- // 3. Nop
- // 0. Latch mpled data
+ // 2. Nop
+ // 3. Latch CPU/fgfifo data
+ // 4. Output npled driver address, CE# and OE# asserted
+ // 5. Nop
+ // 6. Nop
+ // 7. Latch npled data
//
// CPU write:
- // 0. MMU address and data are output, CS# and WE# asserted
- // 1. WE# is deasserted
- // 2. Output mpled driver address, CS# and OE# asserted
- // 3. Nop
- // 0. Latch mpled data
+ // 0. MMU address and data are output, CE# and WE# asserted
+ // 1. Nop
+ // 2. Nop
+ // 3. WE# deasserted
+ // 4. Output npled driver address, CE# and OE# asserted
+ // 5. Nop
+ // 6. Nop
+ // 7. Latch npled data
//
- // No CPU cycle:
- // 0. Deassert CS#, OE#, WE#
+ // No CPU/fgfifo cycle:
+ // 0. Deassert CE#, OE#, WE#
// 1. Nop
- // 2. Output mpled driver address, CS# and OE# asserted
+ // 2. Nop
// 3. Nop
- // 0. Latch mpled data
+ // 4. Output npled driver address, CE# and OE# asserted
+ // 5. Nop
+ // 6. Nop
+ // 7. Latch npled data
//
// ------------------------------------------------------------------------
- // This logic provides a counter for where sram_clk is with respect to
+ // This logic provides a state machine for where sram_clk is with respect to
// cpu_clk. sram_clk = 0 corresponds to the rising edge of cpu_clk.
- (* noprune *) reg [2:0] sram_clk_phase = 3'd0;
+ reg [2:0] sram_clk_phase = 3'd0;
reg [2:0] sram_clk_next_phase = 3'd1;
- reg last_cpu_clk = 1'b0;
+ reg last_cpu_clk = 1'b0;
+ // Sample cpu_clk on the *negative* edge of sram_clk, to avoid the obvious
+ // race condition of the two clocks transitioning at the same time.
always @(negedge sram_clk)
begin
last_cpu_clk <= cpu_clk;
if (cpu_clk & ~last_cpu_clk)
- sram_clk_next_phase <= 3'b1;
+ sram_clk_next_phase <= 3'd1;
else
- sram_clk_next_phase <= sram_clk_next_phase + 3'b1;
+ sram_clk_next_phase <= sram_clk_next_phase + 1'b1;
end
always @(posedge sram_clk)
sram_clk_phase <= sram_clk_next_phase;
- wire [7:0] sram_do; // Data out from sram
- wire sram_oe_w;
- wire sram_we_w;
- reg sram_we_q1;
- reg sram_we_q2;
+ wire [7:0] sram_q; // Data out from sram
+ reg [7:0] sram_do; // Latched output data from sram
+ reg sram_fg_q; // sram_do is for the fg unit
+ reg sram_ce_q;
+ reg sram_oe_q;
+ reg sram_we_q;
reg sram_fgrd;
+ reg [18:0] sram_addr;
// Are we actually accessed by the CPU?
wire sram_cpu = msel[0] & cpu_clk_en;
- assign sram_oe_w = (sram_cpu & ~cpu_rd_n) | sram_fgrd;
- assign sram_we_w = sram_cpu & ~cpu_wr_n;
+ wire [18:0] npled_addr = 19'bx; // Just bullshit for now
+ reg [7:0] npled_do;
- always @(negedge rst_n or posedge fast_clk)
+ always @(negedge rst_n or posedge sram_clk)
if ( ~rst_n )
begin
- sram_we_q1 <= 0;
- sram_we_q2 <= 0;
+ sram_ce_q <= 1'b0;
+ sram_oe_q <= 1'b0;
+ sram_we_q <= 1'b0;
+ sram_fg_q <= 1'b0;
+ sram_addr <= 19'bx;
+ sram_do <= 8'bx;
+ npled_do <= 8'bx;
end // if ( ~rst_n )
else
begin
- // Delayed issues of WE# - to make sure the SRAM
- // doesn't latch data late
- sram_we_q1 <= sram_we_w;
- sram_we_q2 <= sram_we_q1;
+ case (sram_clk_phase)
+ 3'd0:
+ begin
+ npled_do <= sram_q;
+ sram_do <= 8'bx;
+
+ if (sram_cpu)
+ begin
+ sram_ce_q <= 1'b1;
+ sram_we_q <= ~cpu_wr_n;
+ sram_oe_q <= ~cpu_rd_n;
+ sram_addr <= mmu_a[18:0];
+ sram_fg_q <= 1'b0;
+ end
+ else if (sram_fgrd)
+ begin
+ sram_ce_q <= 1'b1;
+ sram_we_q <= 1'b0;
+ sram_oe_q <= 1'b1;
+ sram_addr <= sram_fgaddr;
+ sram_fg_q <= 1'b1;
+ end
+ else
+ begin
+ sram_ce_q <= 1'b0;
+ sram_we_q <= 1'b0;
+ sram_oe_q <= 1'b0;
+ sram_addr <= 19'bx;
+ sram_fg_q <= 1'b0;
+ end // else: !if(sram_fgrd)
+ end
+
+ 3'd3:
+ begin
+ sram_we_q <= 1'b0;
+ end
+
+ 3'd4:
+ begin
+ sram_do <= sram_q;
+ npled_do <= 8'bx;
+
+ sram_ce_q <= 1'b1;
+ sram_we_q <= 1'b0;
+ sram_oe_q <= 1'b1;
+ sram_addr <= npled_addr;
+ end
+
+ default:
+ begin
+ // Do nothing
+ end
+ endcase // case (sram_clk_phase)
end // else: !if( ~rst_n )
- // The address to drive onto the bus.
- // Multiplex the CPU with the Fine Graphics unit.
- wire [18:0] sram_addr = sram_cpu ? mmu_a[18:0] : sram_fgaddr;
-
// Driving output pins.
assign sram_a = sram_addr[18:1];
- assign sram_be_n = sram_addr[0] ? ~2'b10 : ~2'b01;
+ assign sram_be_n = ~{sram_addr[0], ~sram_addr[0]};
- assign sram_ce_n = ~(sram_cpu | sram_fgrd);
- assign sram_oe_n = ~sram_oe_w;
- assign sram_we_n = ~(sram_we_w & ~sram_we_q2);
+ assign sram_ce_n = ~sram_ce_q;
+ assign sram_oe_n = ~sram_oe_q;
+ assign sram_we_n = ~sram_we_q;
- assign sram_dq = sram_we_w ? { cpu_do, cpu_do } : 16'bz;
+ assign sram_dq = sram_we_q ? { cpu_do, cpu_do } : 16'bz;
// SRAM Input side MUX
- assign sram_do = sram_addr[0] ? sram_dq[15:8] : sram_dq[7:0];
+ assign sram_q = sram_addr[0] ? sram_dq[15:8] : sram_dq[7:0];
+
+ // fg unit FIFO handshake
+ assign sram_fgdata = sram_do;
always @(negedge rst_n or posedge cpu_clk)
if ( ~rst_n )
begin
sram_fgrd <= 1'b0;
sram_fgack <= 1'b0;
- sram_fgdata <= 8'hxx;
end
else
begin
sram_fgack <= 1'b0; // Only asserted for one clock
sram_fgrd <= sram_fgreq;
- if (sram_fgrd & ~sram_cpu)
+ if (sram_fg_q)
begin
sram_fgrd <= 1'b0;
- sram_fgdata <= sram_do;
sram_fgack <= 1'b1;
end
end // else: !if( ~rst_n )