!cpu 6510 CONFIG_ZP_ADDR = $f0 LZ_BITS_LEFT = 0 INPLACE = 0 SETUP_LZ_DST = 0 lz_bits = CONFIG_ZP_ADDR + 0 lz_dst = CONFIG_ZP_ADDR + 1 lz_src = CONFIG_ZP_ADDR + 3 lz_len_hi = CONFIG_ZP_ADDR + 5 !macro get_lz_bit { !if LZ_BITS_LEFT = 1 { asl = dst XXX TODO } else { .lz_end_check } .lz_start_over lda #$01 ;we fall through this check on entry and start with literal +get_lz_bit bcs .lz_match ;after each match check for another match or literal? ;------------------ ;LITERAL ;------------------ .lz_literal +get_lz_bit bcs + - ;lz_length as inline +get_lz_bit ;fetch payload bit rol ;can also moved to front and executed once on start +get_lz_bit bcc - + bne + jsr .lz_refill_bits + tax .lz_l_page_ .lz_cp_lit lda (lz_src),y ;/!\ Need to copy this way, or we run into danger to copy from an area that is yet blocked by barrier, this totally sucks, loading in order reveals that sta (lz_dst),y inc A = 1 with rol, but not if we copy literal this way +get_lz_bit bcs .lz_match ;either match with new offset or old offset ;------------------ ;REPEAT LAST OFFSET ;------------------ .lz_repeat +get_lz_bit ;cheaper with 2 branches, as initial branch to .lz_literal therefore is removed bcs + - +get_lz_bit ;fetch payload bit rol ;can also moved to front and executed once on start +get_lz_bit ;cheaper with 2 branches, as initial branch to .lz_literal therefore is removed bcc - + bne + jsr .lz_refill_bits ;fetch more bits beq .lz_m_page ;avoid underflow of A on sbc #$01 faster than forcing carry to 1 with a sec all times + sbc #$01 ;subtract 1, will be added again on adc as C = 1 .lz_match_big ;we enter with length - 1 here from normal match eor #$ff tay .lz_m_page_ eor #$ff ;restore A .lz_match_len2 ;entry from new_offset handling adc first bit for lenth is in carry, and we have %0xxxxxxx xxxxxxxx as offset sta .lz_offset_lo + 1 inc