From: Gerhard Sittig Date: Sat, 7 Nov 2020 12:49:06 +0000 (+0100) Subject: parallel: rephrase word accumulation after reset introduction X-Git-Url: https://sigrok.org/gitaction?a=commitdiff_plain;h=4f4e035397fa0770d47bf8b663e87651e0709511;p=libsigrokdecode.git parallel: rephrase word accumulation after reset introduction Straighten the accumulation of words from bit chunks that are spread across several bus cycles (multiplexed transmission). Simplify the PD's instance variables, keep more state in local vars and explicitly pass related information to API calls. This also unobfuscates the emission of annotations and simplifies future maintenance. Split the accumulation of word data and the emission of its annotation such that reset related activity can flush accumulated data. Introduce a warning when word data gets emitted which does not match the configured word width (early de-assertion of select/enable, or unexpected reset). Presenting this data and amending it with a warning is considered more desirable than not seeing the data at all. This does not affect previous use cases since support for the reset signal was only introduced lately. Also emit annotations in a more logical order. It's unexpected to see the resulting word before its last item is seen. Graphical presentation may not care but automated processing of the decoder output will. This is the previous order of annotation emission which is surprising and got fixed in this commit: 3768240-4118229 parallel: item: "3" 3768240-4468218 parallel: word: "33" 4118229-4468218 parallel: item: "3" 4468218-4818202 parallel: item: "3" 4468218-5268189 parallel: word: "32" 4818202-5268189 parallel: item: "2" 5268189-5368185 parallel: item: "2" 5268189-5568180 parallel: word: "28" 5368185-5568180 parallel: item: "8" 5568180-5668176 parallel: item: "0" 5568180-5868171 parallel: word: "08" 5668176-5868171 parallel: item: "8" 5868171-5968166 parallel: item: "0" 5868171-6168162 parallel: word: "01" 5968166-6168162 parallel: item: "1" 6168162-6268157 parallel: item: "0" 6168162-6468152 parallel: word: "0c" 6268157-6468152 parallel: item: "c" This adjusted emission order won't pass the current test implementation, but manual inspection of the output reveals that all the expected data is present and matches previously extracted information: parallel/hd44780_word_demux/annotation ..................................... Output mismatch Testcase: parallel/hd44780_word_demux/annotation Test output mismatch: + 4118229-4468218 parallel: item: "3" - 4118229-4468218 parallel: item: "3" + 4818202-5268189 parallel: item: "2" - 4818202-5268189 parallel: item: "2" + 5368185-5568180 parallel: item: "8" - 5368185-5568180 parallel: item: "8" + 5668176-5868171 parallel: item: "8" - 5668176-5868171 parallel: item: "8" + 5968166-6168162 parallel: item: "1" - 5968166-6168162 parallel: item: "1" + 6268157-6468152 parallel: item: "c" - 6268157-6468152 parallel: item: "c" --- diff --git a/decoders/parallel/pd.py b/decoders/parallel/pd.py index e0623e7..b4dbdb3 100644 --- a/decoders/parallel/pd.py +++ b/decoders/parallel/pd.py @@ -65,7 +65,7 @@ class Pin: RESET = DATA_N class Ann: - ITEM, WORD = range(2) + ITEM, WORD, WARN = range(3) class ChannelError(Exception): pass @@ -101,84 +101,91 @@ class Decoder(srd.Decoder): annotations = ( ('item', 'Item'), ('word', 'Word'), + ('warning', 'Warning'), ) annotation_rows = ( ('items', 'Items', (Ann.ITEM,)), ('words', 'Words', (Ann.WORD,)), + ('warnings', 'Warnings', (Ann.WARN,)), ) def __init__(self): self.reset() def reset(self): - self.items = [] - self.saved_item = None - self.ss_item = self.es_item = None - self.saved_word = None - self.ss_word = self.es_word = None - self.first = True + self.pend_item = None + self.word_items = [] def start(self): self.out_python = self.register(srd.OUTPUT_PYTHON) self.out_ann = self.register(srd.OUTPUT_ANN) - def putpb(self, data): - self.put(self.ss_item, self.es_item, self.out_python, data) - - def putb(self, data): - self.put(self.ss_item, self.es_item, self.out_ann, data) - - def putpw(self, data): - self.put(self.ss_word, self.es_word, self.out_python, data) - - def putw(self, data): - self.put(self.ss_word, self.es_word, self.out_ann, data) - - def handle_bits(self, item, used_pins): - - # If a word was previously accumulated, then emit its annotation - # now after its end samplenumber became available. - if self.saved_word is not None: - if self.options['wordsize'] > 0: - self.es_word = self.samplenum - self.putw([Ann.WORD, [self.fmt_word.format(self.saved_word)]]) - self.putpw(['WORD', self.saved_word]) - self.saved_word = None - - # Defer annotations for individual items until the next sample - # is taken, and the previous sample's end samplenumber has - # become available. - if self.first: - # Save the start sample and item for later (no output yet). - self.ss_item = self.samplenum - self.first = False - self.saved_item = item - elif self.saved_item is None: - pass - else: - # Output the saved item (from the last CLK edge to the current). - self.es_item = self.samplenum - self.putpb(['ITEM', self.saved_item]) - self.putb([Ann.ITEM, [self.fmt_item.format(self.saved_item)]]) - self.ss_item = self.samplenum - self.saved_item = item - - # Get as many items as the configured wordsize specifies. - if not self.items: - self.ss_word = self.samplenum - self.items.append(item) - ws = self.options['wordsize'] - if len(self.items) < ws: + def putg(self, ss, es, ann, txts): + self.put(ss, es, self.out_ann, [ann, txts]) + + def putpy(self, ss, es, ann, data): + self.put(ss, es, self.out_python, [ann, data]) + + def flush_word(self, bus_width): + if not self.word_items: + return + word_size = self.options['wordsize'] + + items = self.word_items + ss, es = items[0][0], items[-1][1] + items = [i[2] for i in items] + if self.options['endianness'] == 'big': + items.reverse() + word = sum([d << (i * bus_width) for i, d in enumerate(items)]) + + txts = [self.fmt_word.format(word)] + self.putg(ss, es, Ann.WORD, txts) + self.putpy(ss, es, 'WORD', word) + # self.putpy(ss, es, 'WORD', (word, bus_width, word_size)) + + if len(items) != word_size: + txts = ['incomplete word size', 'word size', 'ws'] + self.putg(ss, es, Ann.WARN, txts) + + self.word_items.clear() + + def queue_word(self, now, item, bus_width): + wordsize = self.options['wordsize'] + if not wordsize: return - # Collect words and prepare annotation details, but defer emission - # until the end samplenumber becomes available. - endian = self.options['endianness'] - if endian == 'big': - self.items.reverse() - word = sum([self.items[i] << (i * used_pins) for i in range(ws)]) - self.saved_word = word - self.items = [] + # Terminate a previously seen item of a word first. Emit the + # word's annotation when the last item's end was seen. + if self.word_items: + ss, _, data = self.word_items[-1] + es = now + self.word_items[-1] = (ss, es, data) + if len(self.word_items) == wordsize: + self.flush_word(bus_width) + + # Start tracking the currently seen item (yet unknown end time). + if item is not None: + pend = (now, None, item) + self.word_items.append(pend) + + def handle_bits(self, now, item, bus_width): + + # Optionally flush a previously started item. + if self.pend_item: + ss, _, data = self.pend_item + self.pend_item = None + es = now + txts = [self.fmt_item.format(data)] + self.putg(ss, es, Ann.ITEM, txts) + self.putpy(ss, es, 'ITEM', data) + # self.putpy(ss, es, 'ITEM', (data, bus_width)) + + # Optionally queue the currently seen item. + if item is not None: + self.pend_item = (now, None, item) + + # Pass the current item to the word accumulation logic. + self.queue_word(now, item, bus_width) def decode(self): # Determine which (optional) channels have input data. Insist in @@ -252,10 +259,8 @@ class Decoder(srd.Decoder): if reset_edge: in_reset = pins[Pin.RESET] == reset_active if in_reset: - self.handle_bits(None, num_item_bits) - self.saved_item = None - self.saved_word = None - self.first = True + self.handle_bits(self.samplenum, None, num_item_bits) + self.flush_word(num_item_bits) if in_reset: continue @@ -263,4 +268,4 @@ class Decoder(srd.Decoder): data_bits = [0 if idx is None else pins[idx] for idx in data_indices] data_bits = data_bits[:num_item_bits] item = bitpack(data_bits) - self.handle_bits(item, num_item_bits) + self.handle_bits(self.samplenum, item, num_item_bits)