-# Doxyfile 1.8.6
+# Doxyfile 1.8.13
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
PROJECT_BRIEF = "sigrok protocol decoding library"
-# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
-# the documentation. The maximum height of the logo should not exceed 55 pixels
-# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
-# to the output directory.
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
PROJECT_LOGO = contrib/sigrok-logo-notext.png
OUTPUT_DIRECTORY = doxy
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# performance problems for the file system.
# The default value is: NO.
-CREATE_SUBDIRS = YES
+CREATE_SUBDIRS = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
OUTPUT_LANGUAGE = English
-# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
-# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
INLINE_INHERITED_MEMB = NO
-# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
INHERIT_DOCS = YES
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
-# new page for each member. If set to NO, the documentation of a member will be
-# part of the file/class/namespace that contains it.
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C.
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
#
-# Note For files without extension you can use no_extension as a placeholder.
+# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
MARKDOWN_SUPPORT = YES
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 0.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS = 0
+
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
+# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# Build related configuration options
#---------------------------------------------------------------------------
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
EXTRACT_ALL = YES
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
-# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = NO
-# This flag is only useful for Objective-C code. When set to YES local methods,
+# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO only methods in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO these classes will be included in the various overviews. This option has
-# no effect if EXTRACT_ALL is enabled.
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO these declarations will be
+# (class|struct|union) declarations. If set to NO, these declarations will be
# included in the documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO these
+# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES upper-case letters are also
+# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
CASE_SENSE_NAMES = NO
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES the
+# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order.
+# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order. Note that
+# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
STRICT_PROTO_MATCHING = NO
-# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
-# todo list. This list is created by putting \todo commands in the
-# documentation.
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = YES
-# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
-# test list. This list is created by putting \test commands in the
-# documentation.
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
-# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
-# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES the list
-# will mention the files that were used to generate the documentation.
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. Do not use file names with spaces, bibtex cannot handle them. See
-# also \cite for info how to create references.
+# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
WARNINGS = YES
-# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
-# value. If set to NO doxygen will only warn about wrong or incomplete parameter
-# documentation, but not about the absence of documentation.
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = YES
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered.
+# The default value is: NO.
+
+WARN_AS_ERROR = NO
+
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = .
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
+# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf.
FILE_PATTERNS =
# Note that relative paths are relative to the directory from which doxygen is
# run.
-EXCLUDE = config.h libsigrokdecode-internal.h exception.c \
+EXCLUDE = build config.h libsigrokdecode-internal.h exception.c \
module_sigrokdecode.c type_decoder.c type_logic.c \
util.c
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
INPUT_FILTER =
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER ) will also be used to filter the input files that are used for
+# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
REFERENCES_RELATION = YES
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
VERBATIM_HEADERS = YES
+# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# generated with the -Duse-libclang=ON option for CMake.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS =
+
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
-# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
HTML_STYLESHEET =
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
-# defined cascading style sheet that is included after the standard style sheets
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefor more robust against future updates.
-# Doxygen will copy the style sheet file to the output directory. For an example
-# see the documentation.
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the stylesheet and background images according to
+# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: YES.
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
-# The GENERATE_CHI flag controls if a separate .chi index file is generated (
-# YES) or that it should be included in the master .chm file ( NO).
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
-# The BINARY_TOC flag controls whether a binary table of contents is generated (
-# YES) or a normal table of contents ( NO) in the .chm file.
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
TREEVIEW_WIDTH = 250
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using Javascript. There
-# are two flavours of web server based searching depending on the
-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
-# searching and an index file used by the script. When EXTERNAL_SEARCH is
-# enabled the indexing and searching needs to be provided by external tools. See
-# the section "External Indexing and Searching" for details.
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/).
#
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/). See the section "External Indexing and
# Searching" for details.
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
-# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
MAKEINDEX_CMD_NAME = makeindex
-# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
-# replace them by respectively the title of the page, the current date and time,
-# only the current date, the version number of doxygen, the project name (see
-# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer.
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
PDF_HYPERLINKS = YES
-# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
# higher quality PDF documentation.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP = NO
+
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
-# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
RTF_OUTPUT = rtf
-# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
RTF_EXTENSIONS_FILE =
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE = NO
+
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
-# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
# classes and files.
# The default value is: NO.
MAN_EXTENSION = .3
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
# Configuration options related to the XML output
#---------------------------------------------------------------------------
-# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
XML_OUTPUT = xml
-# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
-# validating XML parser to check the syntax of the XML files.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_SCHEMA =
-
-# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
-# validating XML parser to check the syntax of the XML files.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_DTD =
-
-# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
-# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
DOCBOOK_OUTPUT = docbook
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
-# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
-# Definitions (see http://autogen.sf.net) file that captures the structure of
-# the code including all documentation. Note that this feature is still
-# experimental and incomplete at the moment.
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
-# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
GENERATE_PERLMOD = NO
-# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
PERLMOD_LATEX = NO
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO the
+# understand what is going on. On the other hand, if this tag is set to NO, the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
-# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
-# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
-# in the source code. If set to NO only conditional compilation will be
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
EXPAND_ONLY_PREDEF = YES
-# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-# This gets rid of the SRD_API function prefix in the Doxygen output.
PREDEFINED = SRD_API=
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all refrences to function-like macros that are alone on a line, have an
-# all uppercase name, and do not end with a semicolon. Such function macros are
-# typically used for boiler-plate code, and will confuse the parser if not
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
-# Note: Each tag file must have an unique name (where the name does NOT include
+# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
GENERATE_TAGFILE =
-# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
-# class index. If set to NO only the inherited external classes will be listed.
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
# The default value is: NO.
ALLEXTERNALS = NO
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
-# the modules index. If set to NO, only the current project's groups will be
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
-# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
DIA_PATH =
-# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
-# The default value is: NO.
+# The default value is: YES.
HAVE_DOT = YES
DOT_NUM_THREADS = 0
-# When you want a differently looking font n the dot files that doxygen
+# When you want a differently looking font in the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
# sure dot is able to find the font, which can be done by putting it in a
# standard location or by setting the DOTFONTPATH environment variable or by
GROUP_GRAPHS = YES
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command.
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command.
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot.
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
-# Possible values are: png, jpg, gif and svg.
+# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
+# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
+# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DIAFILE_DIRS =
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH =
+
+# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
+# configuration file for plantuml.
+
+PLANTUML_CFG_FILE =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH =
+
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
DOT_TRANSPARENT = NO
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
GENERATE_LEGEND = YES
-# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
# files that are used to generate the various graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
# Python 3 is always needed.
SR_PKG_CHECK([python3], [SRD_PKGLIBS],
- [python3 >= 3.2], [python-3.6 >= 3.6], [python-3.5 >= 3.5], [python-3.4 >= 3.4], [python-3.3 >= 3.3], [python-3.2 >= 3.2])
+ [python3 >= 3.2], [python-3.7 >= 3.7], [python-3.6 >= 3.6], [python-3.5 >= 3.5], [python-3.4 >= 3.4], [python-3.3 >= 3.3], [python-3.2 >= 3.2])
AS_IF([test "x$sr_have_python3" = xno],
[AC_MSG_ERROR([Cannot find Python 3 development headers.])])
# We also need to find the name of the python3 executable (for 'make install').
# Some OSes call this python3, some call it python3.2, etc. etc.
AC_ARG_VAR([PYTHON3], [Python 3 interpreter])
-AC_CHECK_PROGS([PYTHON3], [python3.6 python3.5 python3.4 python3.3 python3.2 python3])
+AC_CHECK_PROGS([PYTHON3], [python3.7 python3.6 python3.5 python3.4 python3.3 python3.2 python3])
AS_IF([test "x$PYTHON3" = x],
[AC_MSG_ERROR([Cannot find Python 3 interpreter.])])
SR_CHECK_COMPILE_FLAGS([SRD_EXTRA_CFLAGS], [visibility], [-fvisibility=hidden])
# Select suitable compiler warning flags.
-SR_ARG_ENABLE_WARNINGS([SRD_WFLAGS], [-Wall], [-Wall -Wextra -Wmissing-prototypes])
+SR_ARG_ENABLE_WARNINGS([SRD_WFLAGS], [-Wall], [-Wall -Wextra -Wmissing-prototypes -Wshadow -Wformat=2 -Wno-format-nonliteral -Wfloat-equal])
# Link against libm, this is required (among other things) by Python.
SRD_EXTRA_LIBS=
"a list of dict elements.", d->name, attr);
goto err_out;
}
- pdch = g_malloc0(sizeof(struct srd_channel));
+ pdch = g_malloc(sizeof(struct srd_channel));
/* Add to list right away so it doesn't get lost. */
pdchl = g_slist_prepend(pdchl, pdch);
except_out:
srd_exception_catch("Failed to get %s list of %s decoder",
attr, d->name);
+
err_out:
g_slist_free_full(pdchl, &channel_free);
Py_XDECREF(py_channellist);
py_values = PyDict_GetItemString(py_opt, "values");
if (py_values) {
- /* A default is required if a list of values is
- * given, since it's used to verify their type. */
+ /*
+ * A default is required if a list of values is
+ * given, since it's used to verify their type.
+ */
if (!o->def) {
srd_err("No default for option '%s'.", o->id);
goto err_out;
if (!py_item)
goto except_out;
- if (Py_TYPE(py_default) != Py_TYPE(py_item)) {
+ if (py_default && (Py_TYPE(py_default) != Py_TYPE(py_item))) {
srd_err("All values for option '%s' must be "
"of the same type as the default.",
o->id);
except_out:
srd_exception_catch("Failed to get %s decoder options", d->name);
+
err_out:
g_slist_free_full(options, &decoder_option_free);
Py_XDECREF(py_opts);
return SRD_ERR_PYTHON;
}
-/* Convert annotation class attribute to GSList of char **.
- */
+/* Convert annotation class attribute to GSList of char **. */
static int get_annotations(struct srd_decoder *dec)
{
PyObject *py_annlist, *py_ann;
except_out:
srd_exception_catch("Failed to get %s decoder annotations", dec->name);
+
err_out:
g_slist_free_full(annotations, (GDestroyNotify)&g_strfreev);
Py_XDECREF(py_annlist);
return SRD_ERR_PYTHON;
}
-/* Convert annotation_rows to GSList of 'struct srd_decoder_annotation_row'.
- */
+/* Convert annotation_rows to GSList of 'struct srd_decoder_annotation_row'. */
static int get_annotation_rows(struct srd_decoder *dec)
{
PyObject *py_ann_rows, *py_ann_row, *py_ann_classes, *py_item;
except_out:
srd_exception_catch("Failed to get %s decoder annotation rows",
dec->name);
+
err_out:
g_slist_free_full(annotation_rows, &annotation_row_free);
Py_XDECREF(py_ann_rows);
return SRD_ERR_PYTHON;
}
-/* Convert binary classes to GSList of char **.
- */
+/* Convert binary classes to GSList of char **. */
static int get_binary_classes(struct srd_decoder *dec)
{
PyObject *py_bin_classes, *py_bin_class;
except_out:
srd_exception_catch("Failed to get %s decoder binary classes",
dec->name);
+
err_out:
g_slist_free_full(bin_classes, (GDestroyNotify)&g_strfreev);
Py_XDECREF(py_bin_classes);
return SRD_ERR_PYTHON;
}
-/* Check whether the Decoder class defines the named method.
- */
+/* Check whether the Decoder class defines the named method. */
static int check_method(PyObject *py_dec, const char *mod_name,
const char *method_name)
{
goto err_out;
}
- /* Check Decoder class for required methods.
- */
+ /* Check Decoder class for required methods. */
+
if (check_method(d->py_dec, module_name, "start") != SRD_OK) {
fail_txt = "no 'start()' method";
goto err_out;
module_name, fail_txt);
}
fail_txt = NULL;
+
err_out:
if (fail_txt)
srd_err("Failed to load decoder %s: %s", module_name, fail_txt);
return SRD_OK;
}
-static void srd_decoder_load_all_zip_path(char *path)
+static void srd_decoder_load_all_zip_path(char *zip_path)
{
PyObject *zipimport_mod, *zipimporter_class, *zipimporter;
PyObject *prefix_obj, *files, *key, *value, *set, *modname;
if (zipimporter_class == NULL)
goto err_out;
- zipimporter = PyObject_CallFunction(zipimporter_class, "s", path);
+ zipimporter = PyObject_CallFunction(zipimporter_class, "s", zip_path);
if (zipimporter == NULL)
goto err_out;
const gchar *direntry;
if (!(dir = g_dir_open(path, 0, NULL))) {
- /* Not really fatal */
- /* Try zipimport method too */
+ /* Not really fatal. Try zipimport method too. */
srd_decoder_load_all_zip_path(path);
return;
}
- /* This ignores errors returned by srd_decoder_load(). That
+ /*
+ * This ignores errors returned by srd_decoder_load(). That
* function will have logged the cause, but in any case we
- * want to continue anyway. */
+ * want to continue anyway.
+ */
while ((direntry = g_dir_read_name(dir)) != NULL) {
/* The directory name is the module name (e.g. "i2c"). */
srd_decoder_load(direntry);
}
g_dir_close(dir);
-
}
/**
*/
SRD_API int srd_decoder_unload_all(void)
{
- g_slist_foreach(pd_list, (GFunc)srd_decoder_unload, NULL);
+ for (GSList *l = pd_list; l; l = l->next)
+ srd_decoder_unload(l->data);
g_slist_free(pd_list);
pd_list = NULL;
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Michalis Pappas <mpappas@fastmail.fm>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+This decoder stacks on top of the 'i2c' PD and decodes the
+Microchip ATSHA204A CryptoAuthentication protocol.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Michalis Pappas <mpappas@fastmail.fm>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sigrokdecode as srd
+
+WORD_ADDR_RESET = 0x00
+WORD_ADDR_SLEEP = 0x01
+WORD_ADDR_IDLE = 0x02
+WORD_ADDR_COMMAND = 0x03
+
+WORD_ADDR = {0x00: 'RESET', 0x01: 'SLEEP', 0x02: 'IDLE', 0x03: 'COMMAND'}
+
+OPCODE_DERIVE_KEY = 0x1c
+OPCODE_DEV_REV = 0x30
+OPCODE_GEN_DIG = 0x15
+OPCODE_HMAC = 0x11
+OPCODE_CHECK_MAC = 0x28
+OPCODE_LOCK = 0x17
+OPCODE_MAC = 0x08
+OPCODE_NONCE = 0x16
+OPCODE_PAUSE = 0x01
+OPCODE_RANDOM = 0x1b
+OPCODE_READ = 0x02
+OPCODE_SHA = 0x47
+OPCODE_UPDATE_EXTRA = 0x20
+OPCODE_WRITE = 0x12
+
+OPCODES = {
+ 0x01: 'Pause',
+ 0x02: 'Read',
+ 0x08: 'MAC',
+ 0x11: 'HMAC',
+ 0x12: 'Write',
+ 0x15: 'GenDig',
+ 0x16: 'Nonce',
+ 0x17: 'Lock',
+ 0x1b: 'Random',
+ 0x1c: 'DeriveKey',
+ 0x20: 'UpdateExtra',
+ 0x28: 'CheckMac',
+ 0x30: 'DevRev',
+ 0x47: 'SHA',
+}
+
+ZONE_CONFIG = 0x00
+ZONE_OTP = 0x01
+ZONE_DATA = 0x02
+
+ZONES = {0x00: 'CONFIG', 0x01: 'OTP', 0x02: 'DATA'}
+
+STATUS_SUCCESS = 0x00
+STATUS_CHECKMAC_FAIL = 0x01
+STATUS_PARSE_ERROR = 0x03
+STATUS_EXECUTION_ERROR = 0x0f
+STATUS_READY = 0x11
+STATUS_CRC_COMM_ERROR = 0xff
+
+STATUS = {
+ 0x00: 'Command success',
+ 0x01: 'Checkmac failure',
+ 0x03: 'Parse error',
+ 0x0f: 'Execution error',
+ 0x11: 'Ready',
+ 0xff: 'CRC / communications error',
+}
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'atsha204a'
+ name = 'ATSHA204A'
+ longname = 'Microchip ATSHA204A'
+ desc = 'Microchip ATSHA204A CryptoAuthentication device.'
+ license = 'gplv2+'
+ inputs = ['i2c']
+ outputs = ['atsha204a']
+ annotations = (
+ ('waddr', 'Word address'),
+ ('count', 'Count'),
+ ('opcode', 'Opcode'),
+ ('param1', 'Param1'),
+ ('param2', 'Param2'),
+ ('data', 'Data'),
+ ('crc', 'CRC'),
+ ('status', 'Status'),
+ ('warning', 'Warning'),
+ )
+ annotation_rows = (
+ ('frame', 'Frame', (0, 1, 2, 3, 4, 5, 6)),
+ ('status', 'Status', (7,)),
+ ('warnings', 'Warnings', (8,)),
+ )
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.state = 'IDLE'
+ self.waddr = self.opcode = -1
+ self.ss_block = self.es_block = 0
+ self.bytes = []
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+
+ def output_tx_bytes(self):
+ b = self.bytes
+ if len(b) < 1: # Ignore wakeup.
+ return
+ self.waddr = b[0][2]
+ self.put_waddr(b[0])
+ if self.waddr == WORD_ADDR_COMMAND:
+ count = b[1][2]
+ self.put_count(b[1])
+ if len(b) - 1 != count:
+ self.put_warning(b[0][0], b[-1][1],
+ 'Invalid frame length: Got {}, expecting {} '.format(
+ len(b) - 1, count))
+ return
+ self.opcode = b[2][2]
+ self.put_opcode(b[2])
+ self.put_param1(b[3])
+ self.put_param2([b[4], b[5]])
+ self.put_data(b[6:-2])
+ self.put_crc([b[-2], b[-1]])
+
+ def output_rx_bytes(self):
+ b = self.bytes
+ count = b[0][2]
+ self.put_count(b[0])
+ if self.waddr == WORD_ADDR_RESET:
+ self.put_data([b[1]])
+ self.put_crc([b[2], b[3]])
+ self.put_status(b[0][0], b[-1][1], b[1][2])
+ elif self.waddr == WORD_ADDR_COMMAND:
+ if count == 4: # Status / Error.
+ self.put_data([b[1]])
+ self.put_crc([b[2], b[3]])
+ self.put_status(b[0][0], b[-1][1], b[1][2])
+ else:
+ self.put_data(b[1:-2])
+ self.put_crc([b[-2], b[-1]])
+
+ def putx(self, s, data):
+ self.put(s[0], s[1], self.out_ann, data)
+
+ def puty(self, s, data):
+ self.put(s[0][0], s[1][1], self.out_ann, data)
+
+ def putz(self, ss, es, data):
+ self.put(ss, es, self.out_ann, data)
+
+ def put_waddr(self, s):
+ self.putx(s, [0, ['Word addr: %s' % WORD_ADDR[s[2]]]])
+
+ def put_count(self, s):
+ self.putx(s, [1, ['Count: %s' % s[2]]])
+
+ def put_opcode(self, s):
+ self.putx(s, [2, ['Opcode: %s' % OPCODES[s[2]]]])
+
+ def put_param1(self, s):
+ op = self.opcode
+ if op in (OPCODE_CHECK_MAC, OPCODE_DEV_REV, OPCODE_HMAC, \
+ OPCODE_MAC, OPCODE_NONCE, OPCODE_RANDOM, OPCODE_SHA):
+ self.putx(s, [3, ['Mode: %02X' % s[2]]])
+ elif op == OPCODE_DERIVE_KEY:
+ self.putx(s, [3, ['Random: %s' % s[2]]])
+ elif op == OPCODE_GEN_DIG:
+ self.putx(s, [3, ['Zone: %s' % ZONES[s[2]]]])
+ elif op == OPCODE_LOCK:
+ self.putx(s, [3, ['Zone: {}, Summary: {}'.format(
+ 'DATA/OTP' if s[2] else 'CONFIG',
+ 'Ignored' if s[2] & 0x80 else 'Used')]])
+ elif op == OPCODE_PAUSE:
+ self.putx(s, [3, ['Selector: %02X' % s[2]]])
+ elif op == OPCODE_READ:
+ self.putx(s, [3, ['Zone: {}, Length: {}'.format(ZONES[s[2] & 0x03],
+ '32 bytes' if s[2] & 0x90 else '4 bytes')]])
+ elif op == OPCODE_WRITE:
+ self.putx(s, [3, ['Zone: {}, Encrypted: {}, Length: {}'.format(ZONES[s[2] & 0x03],
+ 'Yes' if s[2] & 0x40 else 'No', '32 bytes' if s[2] & 0x90 else '4 bytes')]])
+ else:
+ self.putx(s, [3, ['Param1: %02X' % s[2]]])
+
+ def put_param2(self, s):
+ op = self.opcode
+ if op == OPCODE_DERIVE_KEY:
+ self.puty(s, [4, ['TargetKey: {:02x} {:02x}'.format(s[1][2], s[0][2])]])
+ elif op in (OPCODE_NONCE, OPCODE_PAUSE, OPCODE_RANDOM):
+ self.puty(s, [4, ['Zero: {:02x} {:02x}'.format(s[1][2], s[0][2])]])
+ elif op in (OPCODE_HMAC, OPCODE_MAC, OPCODE_CHECK_MAC, OPCODE_GEN_DIG):
+ self.puty(s, [4, ['SlotID: {:02x} {:02x}'.format(s[1][2], s[0][2])]])
+ elif op == OPCODE_LOCK:
+ self.puty(s, [4, ['Summary: {:02x} {:02x}'.format(s[1][2], s[0][2])]])
+ elif op in (OPCODE_READ, OPCODE_WRITE):
+ self.puty(s, [4, ['Address: {:02x} {:02x}'.format(s[1][2], s[0][2])]])
+ elif op == OPCODE_UPDATE_EXTRA:
+ self.puty(s, [4, ['NewValue: {:02x}'.format(s[0][2])]])
+ else:
+ self.puty(s, [4, ['-']])
+
+ def put_data(self, s):
+ if len(s) == 0:
+ return
+ op = self.opcode
+ if op == OPCODE_CHECK_MAC:
+ self.putz(s[0][0], s[31][1], [5, ['ClientChal: %s' % ' '.join(format(i[2], '02x') for i in s[0:31])]])
+ self.putz(s[32][0], s[63][1], [5, ['ClientResp: %s' % ' '.join(format(i[2], '02x') for i in s[32:63])]])
+ self.putz(s[64][0], s[76][1], [5, ['OtherData: %s' % ' '.join(format(i[2], '02x') for i in s[64:76])]])
+ elif op == OPCODE_DERIVE_KEY:
+ self.putz(s[0][0], s[31][1], [5, ['MAC: %s' % ' '.join(format(i[2], '02x') for i in s)]])
+ elif op == OPCODE_GEN_DIG:
+ self.putz(s[0][0], s[3][1], [5, ['OtherData: %s' % ' '.join(format(i[2], '02x') for i in s)]])
+ elif op == OPCODE_MAC:
+ self.putz(s[0][0], s[31][1], [5, ['Challenge: %s' % ' '.join(format(i[2], '02x') for i in s)]])
+ elif op == OPCODE_WRITE:
+ if len(s) > 32: # Value + MAC.
+ self.putz(s[0][0], s[-31][1], [5, ['Value: %s' % ' '.join(format(i[2], '02x') for i in s)]])
+ self.putz(s[-32][0], s[-1][1], [5, ['MAC: %s' % ' '.join(format(i[2], '02x') for i in s)]])
+ else: # Just value.
+ self.putz(s[0][0], s[-1][1], [5, ['Value: %s' % ' '.join(format(i[2], '02x') for i in s)]])
+ else:
+ self.putz(s[0][0], s[-1][1], [5, ['Data: %s' % ' '.join(format(i[2], '02x') for i in s)]])
+
+ def put_crc(self, s):
+ self.puty(s, [6, ['CRC: {:02X} {:02X}'.format(s[0][2], s[1][2])]])
+
+ def put_status(self, ss, es, status):
+ self.putz(ss, es, [7, ['Status: %s' % STATUS[status]]])
+
+ def put_warning(self, ss, es, msg):
+ self.putz(ss, es, [8, ['Warning: %s' % msg]])
+
+ def decode(self, ss, es, data):
+ cmd, databyte = data
+
+ # State machine.
+ if self.state == 'IDLE':
+ # Wait for an I²C START condition.
+ if cmd != 'START':
+ return
+ self.state = 'GET SLAVE ADDR'
+ self.ss_block = ss
+ elif self.state == 'GET SLAVE ADDR':
+ # Wait for an address read/write operation.
+ if cmd == 'ADDRESS READ':
+ self.state = 'READ REGS'
+ elif cmd == 'ADDRESS WRITE':
+ self.state = 'WRITE REGS'
+ elif self.state == 'READ REGS':
+ if cmd == 'DATA READ':
+ self.bytes.append([ss, es, databyte])
+ elif cmd == 'STOP':
+ self.es_block = es
+ # Reset the opcode before received data, as this causes
+ # responses to be displayed incorrectly.
+ self.opcode = -1
+ self.output_rx_bytes()
+ self.waddr = -1
+ self.bytes = []
+ self.state = 'IDLE'
+ elif self.state == 'WRITE REGS':
+ if cmd == 'DATA WRITE':
+ self.bytes.append([ss, es, databyte])
+ elif cmd == 'STOP':
+ self.es_block = es
+ self.output_tx_bytes()
+ self.bytes = []
+ self.state = 'IDLE'
+
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Jorge Solla Rubiales <jorgesolla@gmail.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+Consumer Electronics Control (CEC) protocol allows users to command and
+control devices connected through HDMI.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Jorge Solla Rubiales <jorgesolla@gmail.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program. If not, see <http://www.gnu.org/licenses/>.
+##
+
+import sigrokdecode as srd
+from .protocoldata import *
+
+# Pulse types
+class Pulse:
+ INVALID, START, ZERO, ONE = range(4)
+
+# Protocol stats
+class Stat:
+ WAIT_START, GET_BITS, WAIT_EOM, WAIT_ACK = range(4)
+
+# Pulse times in milliseconds
+timing = {
+ Pulse.START: {
+ 'low': { 'min': 3.5, 'max': 3.9 },
+ 'total': { 'min': 4.3, 'max': 4.7 }
+ },
+ Pulse.ZERO: {
+ 'low': { 'min': 1.3, 'max': 1.7 },
+ 'total': { 'min': 2.05, 'max': 2.75 }
+ },
+ Pulse.ONE: {
+ 'low': { 'min': 0.4, 'max': 0.8 },
+ 'total': { 'min': 2.05, 'max': 2.75 }
+ }
+}
+
+class ChannelError(Exception):
+ pass
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'cec'
+ name = 'CEC'
+ longname = 'HDMI-CEC'
+ desc = 'HDMI Consumer Electronics Control (CEC) protocol.'
+ license = 'gplv2+'
+ inputs = ['logic']
+ outputs = ['cec']
+ channels = (
+ {'id': 'cec', 'name': 'CEC', 'desc': 'CEC bus data'},
+ )
+ annotations = (
+ ('st', 'Start'),
+ ('eom-0', 'End of message'),
+ ('eom-1', 'Message continued'),
+ ('nack', 'ACK not set'),
+ ('ack', 'ACK set'),
+ ('bits', 'Bits'),
+ ('bytes', 'Bytes'),
+ ('frames', 'Frames'),
+ ('sections', 'Sections'),
+ ('warnings', 'Warnings')
+ )
+ annotation_rows = (
+ ('bits', 'Bits', (0, 1, 2, 3, 4, 5)),
+ ('bytes', 'Bytes', (6,)),
+ ('frames', 'Frames', (7,)),
+ ('sections', 'Sections', (8,)),
+ ('warnings', 'Warnings', (9,))
+ )
+
+ def __init__(self):
+ self.reset()
+
+ def precalculate(self):
+ # Restrict max length of ACK/NACK labels to 2 BIT pulses.
+ bit_time = timing[Pulse.ZERO]['total']['min'] * 2
+ self.max_ack_len_samples = round((bit_time / 1000) * self.samplerate)
+
+ def reset(self):
+ self.stat = Stat.WAIT_START
+ self.samplerate = None
+ self.fall_start = None
+ self.fall_end = None
+ self.rise = None
+ self.reset_frame_vars()
+
+ def reset_frame_vars(self):
+ self.eom = None
+ self.bit_count = 0
+ self.byte_count = 0
+ self.byte = 0
+ self.byte_start = None
+ self.frame_start = None
+ self.frame_end = None
+ self.is_nack = 0
+ self.cmd_bytes = []
+
+ def metadata(self, key, value):
+ if key == srd.SRD_CONF_SAMPLERATE:
+ self.samplerate = value
+ self.precalculate()
+
+ def handle_frame(self, is_nack):
+ if self.fall_start is None or self.fall_end is None:
+ return
+
+ i = 0
+ str = ''
+ while i < len(self.cmd_bytes):
+ str += '{:02x}'.format(self.cmd_bytes[i]['val'])
+ if i != (len(self.cmd_bytes) - 1):
+ str += ':'
+ i += 1
+
+ self.put(self.frame_start, self.frame_end, self.out_ann, [7, [str]])
+
+ i = 0
+ operands = 0
+ str = ''
+ while i < len(self.cmd_bytes):
+ if i == 0: # Parse header
+ (src, dst) = decode_header(self.cmd_bytes[i]['val'])
+ str = 'HDR: ' + src + ', ' + dst
+ elif i == 1: # Parse opcode
+ str += ' | OPC: ' + opcodes.get(self.cmd_bytes[i]['val'], 'Invalid')
+ else: # Parse operands
+ if operands == 0:
+ str += ' | OPS: '
+ operands += 1
+ str += '0x{:02x}'.format(self.cmd_bytes[i]['val'])
+ if i != len(self.cmd_bytes) - 1:
+ str += ', '
+ i += 1
+
+ # Header only commands are PINGS
+ if i == 1:
+ str += ' | OPC: PING' if self.eom else ' | OPC: NONE. Aborted cmd'
+
+ # Add extra information (ack of the command from the destination)
+ str += ' | R: NACK' if is_nack else ' | R: ACK'
+
+ self.put(self.frame_start, self.frame_end, self.out_ann, [8, [str]])
+
+ def process(self):
+ zero_time = ((self.rise - self.fall_start) / self.samplerate) * 1000.0
+ total_time = ((self.fall_end - self.fall_start) / self.samplerate) * 1000.0
+ pulse = Pulse.INVALID
+
+ # VALIDATION: Identify pulse based on length of the low period
+ for key in timing:
+ if zero_time >= timing[key]['low']['min'] and zero_time <= timing[key]['low']['max']:
+ pulse = key
+ break
+
+ # VALIDATION: Invalid pulse
+ if pulse == Pulse.INVALID:
+ self.stat = Stat.WAIT_START
+ self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Invalid pulse: Wrong timing']])
+ return
+
+ # VALIDATION: If waiting for start, discard everything else
+ if self.stat == Stat.WAIT_START and pulse != Pulse.START:
+ self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Expected START: BIT found']])
+ return
+
+ # VALIDATION: If waiting for ACK or EOM, only BIT pulses (0/1) are expected
+ if (self.stat == Stat.WAIT_ACK or self.stat == Stat.WAIT_EOM) and pulse == Pulse.START:
+ self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Expected BIT: START received)']])
+ self.stat = Stat.WAIT_START
+
+ # VALIDATION: ACK bit pulse remains high till the next frame (if any): Validate only min time of the low period
+ if self.stat == Stat.WAIT_ACK and pulse != Pulse.START:
+ if total_time < timing[pulse]['total']['min']:
+ pulse = Pulse.INVALID
+ self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['ACK pulse below minimun time']])
+ self.stat = Stat.WAIT_START
+ return
+
+ # VALIDATION / PING FRAME DETECTION: Initiator doesn't sets the EOM = 1 but stops sending when ack doesn't arrive
+ if self.stat == Stat.GET_BITS and pulse == Pulse.START:
+ # Make sure we received a complete byte to consider it a valid ping
+ if self.bit_count == 0:
+ self.handle_frame(self.is_nack)
+ else:
+ self.put(self.frame_start, self.samplenum, self.out_ann, [9, ['ERROR: Incomplete byte received']])
+
+ # Set wait start so we receive next frame
+ self.stat = Stat.WAIT_START
+
+ # VALIDATION: Check timing of the BIT (0/1) pulse in any other case (not waiting for ACK)
+ if self.stat != Stat.WAIT_ACK and pulse != Pulse.START:
+ if total_time < timing[pulse]['total']['min'] or total_time > timing[pulse]['total']['max']:
+ self.put(self.fall_start, self.fall_end, self.out_ann, [9, ['Bit pulse exceeds total pulse timespan']])
+ pulse = Pulse.INVALID
+ self.stat = Stat.WAIT_START
+ return
+
+ if pulse == Pulse.ZERO:
+ bit = 0
+ elif pulse == Pulse.ONE:
+ bit = 1
+
+ # STATE: WAIT START
+ if self.stat == Stat.WAIT_START:
+ self.stat = Stat.GET_BITS
+ self.reset_frame_vars()
+ self.put(self.fall_start, self.fall_end, self.out_ann, [0, ['ST']])
+
+ # STATE: GET BITS
+ elif self.stat == Stat.GET_BITS:
+ # Reset stats on first bit
+ if self.bit_count == 0:
+ self.byte_start = self.fall_start
+ self.byte = 0
+
+ # If 1st byte of the datagram save its sample num
+ if len(self.cmd_bytes) == 0:
+ self.frame_start = self.fall_start
+
+ self.byte += (bit << (7 - self.bit_count))
+ self.bit_count += 1
+ self.put(self.fall_start, self.fall_end, self.out_ann, [5, [str(bit)]])
+
+ if self.bit_count == 8:
+ self.bit_count = 0
+ self.byte_count += 1
+ self.stat = Stat.WAIT_EOM
+ self.put(self.byte_start, self.samplenum, self.out_ann, [6, ['0x{:02x}'.format(self.byte)]])
+ self.cmd_bytes.append({'st': self.byte_start, 'ed': self.samplenum, 'val': self.byte})
+
+ # STATE: WAIT EOM
+ elif self.stat == Stat.WAIT_EOM:
+ self.eom = bit
+ self.frame_end = self.fall_end
+
+ a = [2, ['EOM=Y']] if self.eom else [1, ['EOM=N']]
+ self.put(self.fall_start, self.fall_end, self.out_ann, a)
+
+ self.stat = Stat.WAIT_ACK
+
+ # STATE: WAIT ACK
+ elif self.stat == Stat.WAIT_ACK:
+ # If a frame with broadcast destination is being sent, the ACK is
+ # inverted: a 0 is considered a NACK, therefore we invert the value
+ # of the bit here, so we match the real meaning of it.
+ if (self.cmd_bytes[0]['val'] & 0x0F) == 0x0F:
+ bit = ~bit & 0x01
+
+ if (self.fall_end - self.fall_start) > self.max_ack_len_samples:
+ ann_end = self.fall_start + self.max_ack_len_samples
+ else:
+ ann_end = self.fall_end
+
+ if bit:
+ # Any NACK detected in the frame is enough to consider the
+ # whole frame NACK'd.
+ self.is_nack = 1
+ self.put(self.fall_start, ann_end, self.out_ann, [3, ['NACK']])
+ else:
+ self.put(self.fall_start, ann_end, self.out_ann, [4, ['ACK']])
+
+ # After ACK bit, wait for new datagram or continue reading current
+ # one based on EOM value.
+ if self.eom or self.is_nack:
+ self.stat = Stat.WAIT_START
+ self.handle_frame(self.is_nack)
+ else:
+ self.stat = Stat.GET_BITS
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+
+ def decode(self):
+ if not self.samplerate:
+ raise SamplerateError('Cannot decode without samplerate.')
+
+ # Wait for first falling edge.
+ self.wait({0: 'f'})
+ self.fall_end = self.samplenum
+
+ while True:
+ self.wait({0: 'r'})
+ self.rise = self.samplenum
+
+ if self.stat == Stat.WAIT_ACK:
+ self.wait([{0: 'f'}, {'skip': self.max_ack_len_samples}])
+ else:
+ self.wait([{0: 'f'}])
+
+ self.fall_start = self.fall_end
+ self.fall_end = self.samplenum
+ self.process()
+
+ # If there was a timeout while waiting for ACK: RESYNC.
+ # Note: This is an expected situation as no new falling edge will
+ # happen until next frame is transmitted.
+ if self.matched == (False, True):
+ self.wait({0: 'f'})
+ self.fall_end = self.samplenum
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Jorge Solla Rubiales <jorgesolla@gmail.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program. If not, see <http://www.gnu.org/licenses/>.
+##
+
+logical_adresses = [
+ 'TV',
+ 'Recording_1',
+ 'Recording_2',
+ 'Tuner_1',
+ 'Playback_1',
+ 'AudioSystem',
+ 'Tuner2',
+ 'Tuner3',
+ 'Playback_2',
+ 'Recording_3',
+ 'Tuner_4',
+ 'Playback_3',
+ 'Backup_1',
+ 'Backup_2',
+ 'FreeUse',
+]
+
+# List taken from LibCEC.
+opcodes = {
+ 0x82: 'ACTIVE_SOURCE',
+ 0x04: 'IMAGE_VIEW_ON',
+ 0x0D: 'TEXT_VIEW_ON',
+ 0x9D: 'INACTIVE_SOURCE',
+ 0x85: 'REQUEST_ACTIVE_SOURCE',
+ 0x80: 'ROUTING_CHANGE',
+ 0x81: 'ROUTING_INFORMATION',
+ 0x86: 'SET_STREAM_PATH',
+ 0x36: 'STANDBY',
+ 0x0B: 'RECORD_OFF',
+ 0x09: 'RECORD_ON',
+ 0x0A: 'RECORD_STATUS',
+ 0x0F: 'RECORD_TV_SCREEN',
+ 0x33: 'CLEAR_ANALOGUE_TIMER',
+ 0x99: 'CLEAR_DIGITAL_TIMER',
+ 0xA1: 'CLEAR_EXTERNAL_TIMER',
+ 0x34: 'SET_ANALOGUE_TIMER',
+ 0x97: 'SET_DIGITAL_TIMER',
+ 0xA2: 'SET_EXTERNAL_TIMER',
+ 0x67: 'SET_TIMER_PROGRAM_TITLE',
+ 0x43: 'TIMER_CLEARED_STATUS',
+ 0x35: 'TIMER_STATUS',
+ 0x9E: 'CEC_VERSION',
+ 0x9F: 'GET_CEC_VERSION',
+ 0x83: 'GIVE_PHYSICAL_ADDRESS',
+ 0x91: 'GET_MENU_LANGUAGE',
+ 0x84: 'REPORT_PHYSICAL_ADDRESS',
+ 0x32: 'SET_MENU_LANGUAGE',
+ 0x42: 'DECK_CONTROL',
+ 0x1B: 'DECK_STATUS',
+ 0x1A: 'GIVE_DECK_STATUS',
+ 0x41: 'PLAY',
+ 0x08: 'GIVE_TUNER_DEVICE_STATUS',
+ 0x92: 'SELECT_ANALOGUE_SERVICE',
+ 0x93: 'SELECT_DIGITAL_SERVICE',
+ 0x07: 'TUNER_DEVICE_STATUS',
+ 0x06: 'TUNER_STEP_DECREMENT',
+ 0x05: 'TUNER_STEP_INCREMENT',
+ 0x87: 'DEVICE_VENDOR_ID',
+ 0x8C: 'GIVE_DEVICE_VENDOR_ID',
+ 0x89: 'VENDOR_COMMAND',
+ 0xA0: 'VENDOR_COMMAND_WITH_ID',
+ 0x8A: 'VENDOR_REMOTE_BUTTON_DOWN',
+ 0x8B: 'VENDOR_REMOTE_BUTTON_UP',
+ 0x64: 'SET_OSD_STRING',
+ 0x46: 'GIVE_OSD_NAME',
+ 0x47: 'SET_OSD_NAME',
+ 0x8D: 'MENU_REQUEST',
+ 0x8E: 'MENU_STATUS',
+ 0x44: 'USER_CONTROL_PRESSED',
+ 0x45: 'USER_CONTROL_RELEASE',
+ 0x8F: 'GIVE_DEVICE_POWER_STATUS',
+ 0x90: 'REPORT_POWER_STATUS',
+ 0x00: 'FEATURE_ABORT',
+ 0xFF: 'ABORT',
+ 0x71: 'GIVE_AUDIO_STATUS',
+ 0x7D: 'GIVE_SYSTEM_AUDIO_MODE_STATUS',
+ 0x7A: 'REPORT_AUDIO_STATUS',
+ 0x72: 'SET_SYSTEM_AUDIO_MODE',
+ 0x70: 'SYSTEM_AUDIO_MODE_REQUEST',
+ 0x7E: 'SYSTEM_AUDIO_MODE_STATUS',
+ 0x9A: 'SET_AUDIO_RATE',
+}
+
+def resolve_logical_address(id, is_initiator):
+ if id < 0 or id > 0x0F:
+ return 'Invalid'
+
+ # Special handling of 0x0F.
+ if id == 0x0F:
+ return 'Unregistered' if is_initiator else 'Broadcast'
+
+ return logical_adresses[id]
+
+def decode_header(header):
+ src = (header & 0xF0) >> 4
+ dst = (header & 0x0F)
+ return (resolve_logical_address(src, 1), resolve_logical_address(dst, 0))
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Elias Oenal <sigrok@eliasoenal.com>
+## All rights reserved.
+##
+## Redistribution and use in source and binary forms, with or without
+## modification, are permitted provided that the following conditions are met:
+##
+## 1. Redistributions of source code must retain the above copyright notice,
+## this list of conditions and the following disclaimer.
+## 2. Redistributions in binary form must reproduce the above copyright notice,
+## this list of conditions and the following disclaimer in the documentation
+## and/or other materials provided with the distribution.
+##
+## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+## POSSIBILITY OF SUCH DAMAGE.
+##
+
+'''
+This decoder stacks on top of the 'mdio' PD and decodes the CFP 100G
+pluggable transceiver protocol.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Elias Oenal <sigrok@eliasoenal.com>
+## All rights reserved.
+##
+## Redistribution and use in source and binary forms, with or without
+## modification, are permitted provided that the following conditions are met:
+##
+## 1. Redistributions of source code must retain the above copyright notice,
+## this list of conditions and the following disclaimer.
+## 2. Redistributions in binary form must reproduce the above copyright notice,
+## this list of conditions and the following disclaimer in the documentation
+## and/or other materials provided with the distribution.
+##
+## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+## POSSIBILITY OF SUCH DAMAGE.
+##
+
+import sigrokdecode as srd
+
+MODULE_ID = {
+ 0x00: 'Unknown or unspecified',
+ 0x01: 'GBIC',
+ 0x02: 'Module/connector soldered to motherboard',
+ 0x03: 'SFP',
+ 0x04: '300 pin XSBI',
+ 0x05: 'XENPAK',
+ 0x06: 'XFP',
+ 0x07: 'XFF',
+ 0x08: 'XFP-E',
+ 0x09: 'XPAK',
+ 0x0a: 'X2',
+ 0x0B: 'DWDM-SFP',
+ 0x0C: 'QSFP',
+ 0x0D: 'QSFP+',
+ 0x0E: 'CFP',
+ 0x0F: 'CXP (TBD)',
+ 0x11: 'CFP2',
+ 0x12: 'CFP4',
+}
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'cfp'
+ name = 'CFP'
+ longname = '100 Gigabit C form-factor pluggable'
+ desc = '100 Gigabit C form-factor pluggable (CFP) protocol.'
+ license = 'BSD'
+ inputs = ['mdio']
+ outputs = ['cfp']
+ annotations = (
+ ('register', 'Register'),
+ ('decode', 'Decode'),
+ )
+ annotation_rows = (
+ ('registers', 'Registers', (0,)),
+ ('decodes', 'Decodes', (1,)),
+ )
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ pass
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+
+ def putx(self, data):
+ self.put(self.ss, self.es, self.out_ann, data)
+
+ def decode(self, ss, es, data):
+ self.ss, self.es = ss, es
+ for (clause45, clause45_addr, is_read, portad, devad, reg) in data:
+ if not is_read:
+ continue
+ if clause45_addr in range(0x8000, 0x807F + 1):
+ self.putx([0, ['CFP NVR 1: Basic ID register', 'NVR1']])
+ if clause45_addr == 0x8000:
+ self.putx([1, ['Module identifier: %s' % \
+ MODULE_ID.get(reg, 'Reserved')]])
+ elif clause45_addr in range(0x8080, 0x80FF + 1):
+ self.putx([0, ['CFP NVR 2: Extended ID register', 'NVR2']])
+ elif clause45_addr in range(0x8100, 0x817F + 1):
+ self.putx([0, ['CFP NVR 3: Network lane specific register', 'NVR3']])
+ elif clause45_addr in range(0x8180, 0x81FF + 1):
+ self.putx([0, ['CFP NVR 4', 'NVR4']])
+ elif clause45_addr in range(0x8400, 0x847F + 1):
+ self.putx([0, ['Vendor NVR 1: Vendor data register', 'V-NVR1']])
+ elif clause45_addr in range(0x8480, 0x84FF + 1):
+ self.putx([0, ['Vendor NVR 2: Vendor data register', 'V-NVR2']])
+ elif clause45_addr in range(0x8800, 0x887F + 1):
+ self.putx([0, ['User NVR 1: User data register', 'U-NVR1']])
+ elif clause45_addr in range(0x8880, 0x88FF + 1):
+ self.putx([0, ['User NVR 2: User data register', 'U-NVR2']])
+ elif clause45_addr in range(0xA000, 0xA07F + 1):
+ self.putx([0, ['CFP Module VR 1: CFP Module level control and DDM register', 'Mod-VR1']])
+ elif clause45_addr in range(0xA080, 0xA0FF + 1):
+ self.putx([0, ['MLG VR 1: MLG Management Interface register', 'MLG-VR1']])
def bcd2int(b):
return (b & 0x0f) + ((b >> 4) * 10)
+def bin2int(s: str):
+ return int('0b' + s, 2)
+
def bitpack(bits):
- res = 0
- for i, b in enumerate(bits):
- res |= b << i
- return res
+ return sum([b << i for i, b in enumerate(bits)])
def bitunpack(num, minbits=0):
res = []
import sigrokdecode as srd
+PIN_DATA, PIN_RESET = range(2)
+ROW_EDGE, ROW_WORD, ROW_RESET = range(3)
+
class Decoder(srd.Decoder):
api_version = 3
id = 'counter'
('word_reset', 'Word reset'),
)
annotation_rows = (
- ('edge_counts', 'Edges', (0,)),
- ('word_counts', 'Words', (1,)),
- ('word_resets', 'Word resets', (2,)),
+ ('edge_counts', 'Edges', (ROW_EDGE,)),
+ ('word_counts', 'Words', (ROW_WORD,)),
+ ('word_resets', 'Word resets', (ROW_RESET,)),
)
options = (
{'id': 'data_edge', 'desc': 'Edges to count (data)', 'default': 'any',
{'id': 'divider', 'desc': 'Count divider (word width)', 'default': 0},
{'id': 'reset_edge', 'desc': 'Edge which clears counters (reset)',
'default': 'falling', 'values': ('rising', 'falling')},
+ {'id': 'edge_off', 'desc': 'Edge counter value after start/reset', 'default': 0},
+ {'id': 'word_off', 'desc': 'Word counter value after start/reset', 'default': 0},
+ {'id': 'dead_cycles', 'desc': 'Ignore this many edges after reset', 'default': 0},
+ {'id': 'start_with_reset', 'desc': 'Assume decode starts with reset',
+ 'default': 'no', 'values': ('no', 'yes')},
)
def __init__(self):
self.reset()
def reset(self):
- self.edge_count = 0
- self.word_count = 0
- self.have_reset = None
+ pass
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
- self.edge = self.options['data_edge']
- self.divider = self.options['divider']
- if self.divider < 0:
- self.divider = 0
- def putc(self, cls, annlist):
- self.put(self.samplenum, self.samplenum, self.out_ann, [cls, annlist])
+ def putc(self, cls, ss, annlist):
+ self.put(ss, self.samplenum, self.out_ann, [cls, annlist])
def decode(self):
- condition = [{'rising': {0: 'r'},
- 'falling': {0: 'f'},
- 'any': {0: 'e'},}[self.edge]]
+ opt_edge_map = {'rising': 'r', 'falling': 'f', 'any': 'e'}
+
+ data_edge = self.options['data_edge']
+ divider = self.options['divider']
+ if divider < 0:
+ divider = 0
+ reset_edge = self.options['reset_edge']
+
+ condition = [{PIN_DATA: opt_edge_map[data_edge]}]
+ have_reset = self.has_channel(PIN_RESET)
+ if have_reset:
+ cond_reset = len(condition)
+ condition.append({PIN_RESET: opt_edge_map[reset_edge]})
+
+ edge_count = int(self.options['edge_off'])
+ edge_start = None
+ word_count = int(self.options['word_off'])
+ word_start = None
- if self.has_channel(1):
- self.have_reset = True
- condition.append({1: self.options['reset_edge'][0]})
+ if self.options['start_with_reset'] == 'yes':
+ dead_count = int(self.options['dead_cycles'])
+ else:
+ dead_count = 0
while True:
self.wait(condition)
- if self.have_reset and self.matched[1]:
- self.edge_count = 0
- self.word_count = 0
- self.putc(2, ['Word reset', 'Reset', 'Rst', 'R'])
+ now = self.samplenum
+
+ if have_reset and self.matched[cond_reset]:
+ edge_count = int(self.options['edge_off'])
+ edge_start = now
+ word_count = int(self.options['word_off'])
+ word_start = now
+ self.putc(ROW_RESET, now, ['Word reset', 'Reset', 'Rst', 'R'])
+ dead_count = int(self.options['dead_cycles'])
+ continue
+
+ if dead_count:
+ dead_count -= 1
+ edge_start = now
+ word_start = now
continue
- self.edge_count += 1
+ # Implementation note: In the absence of a RESET condition
+ # before the first data edge, any arbitrary choice of where
+ # to start the annotation is valid. One may choose to emit a
+ # narrow annotation (where ss=es), or assume that the cycle
+ # which corresponds to the counter value started at sample
+ # number 0. We decided to go with the latter here, to avoid
+ # narrow annotations (see bug #1210). None of this matters in
+ # the presence of a RESET condition in the input stream.
+ if edge_start is None:
+ edge_start = 0
+ if word_start is None:
+ word_start = 0
+
+ edge_count += 1
+ self.putc(ROW_EDGE, edge_start, ["{:d}".format(edge_count)])
+ edge_start = now
- self.putc(0, [str(self.edge_count)])
- if self.divider > 0 and (self.edge_count % self.divider) == 0:
- self.word_count += 1
- self.putc(1, [str(self.word_count)])
+ word_edge_count = edge_count - int(self.options['edge_off'])
+ if divider and (word_edge_count % divider) == 0:
+ word_count += 1
+ self.putc(ROW_WORD, word_start, ["{:d}".format(word_count)])
+ word_start = now
else:
self.tmp |= (bit << (c - 1))
if c == 14:
- s = bin(self.tmp)[2:].zfill(14)
+ s = '{:014b}'.format(self.tmp)
self.putb([1, ['Special bits: %s' % s, 'SB: %s' % s]])
elif c == 15:
s = '' if (bit == 1) else 'not '
self.tmp |= (bit << (c - 42))
if c == 44:
d = bcd2int(self.tmp)
- dn = calendar.day_name[d - 1] # day_name[0] == Monday
- self.putb([13, ['Day of week: %d (%s)' % (d, dn),
- 'DoW: %d (%s)' % (d, dn)]])
+ try:
+ dn = calendar.day_name[d - 1] # day_name[0] == Monday
+ self.putb([13, ['Day of week: %d (%s)' % (d, dn),
+ 'DoW: %d (%s)' % (d, dn)]])
+ except IndexError:
+ self.putb([19, ['Day of week: %d (%s)' % (d, 'invalid'),
+ 'DoW: %d (%s)' % (d, 'inv')]])
elif c in range(45, 49 + 1):
# Month (1-12): DCF77 bits 45-49 (BCD format).
if c == 45:
self.tmp |= (bit << (c - 45))
if c == 49:
m = bcd2int(self.tmp)
- mn = calendar.month_name[m] # month_name[1] == January
- self.putb([14, ['Month: %d (%s)' % (m, mn),
- 'Mon: %d (%s)' % (m, mn)]])
+ try:
+ mn = calendar.month_name[m] # month_name[1] == January
+ self.putb([14, ['Month: %d (%s)' % (m, mn),
+ 'Mon: %d (%s)' % (m, mn)]])
+ except IndexError:
+ self.putb([19, ['Month: %d (%s)' % (m, 'invalid'),
+ 'Mon: %d (%s)' % (m, 'inv')]])
elif c in range(50, 57 + 1):
# Year (0-99): DCF77 bits 50-57 (BCD format).
if c == 50:
self.putx([16, ['Date parity: %s' % s, 'DP: %s' % s]])
self.datebits = []
else:
- raise Exception('Invalid DCF77 bit: %d' % c)
+ self.putx([19, ['Invalid DCF77 bit: %d' % c,
+ 'Invalid bit: %d' % c, 'Inv: %d' % c]])
def decode(self):
if not self.samplerate:
elif len_high_ms in range(161, 260 + 1):
bit = 1
else:
- bit = -1 # TODO: Error?
+ bit = -1
- # There's no bit 59, make sure none is decoded.
- if bit in (0, 1) and self.bitcount in range(0, 58 + 1):
+ if bit in (0, 1):
self.handle_dcf77_bit(bit)
self.bitcount += 1
+ else:
+ self.putx([19, ['Invalid bit timing', 'Inv timing', 'Inv']])
self.state = 'WAIT FOR RISING EDGE'
self.sn = []
# Received data
self.cache = []
+ # Random read offset
+ self.offset = 0
+ # Extensions
+ self.extension = 0
+ self.ext_sn = [[]]
+ self.ext_cache = [[]]
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def decode(self, ss, es, data):
cmd, data = data
+ if cmd == 'ADDRESS WRITE' and data == 0x50:
+ self.state = 'offset'
+ self.ss = ss
+ return
+
+ if cmd == 'ADDRESS READ' and data == 0x50:
+ if self.extension > 0:
+ self.state = 'extensions'
+ s = str(self.extension)
+ t = ["Extension: " + s, "X: " + s, s]
+ else:
+ self.state = 'header'
+ t = ["EDID"]
+ self.put(ss, es, self.out_ann, [ANN_SECTIONS, t])
+ return
+
+ if cmd == 'DATA WRITE' and self.state == 'offset':
+ self.offset = data
+ self.extension = self.offset // 128
+ self.cnt = self.offset % 128
+ if self.extension > 0:
+ ext = self.extension - 1
+ l = len(self.ext_sn[ext])
+ # Truncate or extend to self.cnt.
+ self.sn = self.ext_sn[ext][0:self.cnt] + [0] * max(0, self.cnt - l)
+ self.cache = self.ext_cache[ext][0:self.cnt] + [0] * max(0, self.cnt - l)
+ else:
+ l = len(self.sn)
+ self.sn = self.sn[0:self.cnt] + [0] * max(0, self.cnt - l)
+ self.cache = self.cache[0:self.cnt] + [0] * max(0, self.cnt - l)
+ ss = self.ss if self.ss else ss
+ s = str(data)
+ t = ["Offset: " + s, "O: " + s, s]
+ self.put(ss, es, self.out_ann, [ANN_SECTIONS, t])
+ return
+
# We only care about actual data bytes that are read (for now).
if cmd != 'DATA READ':
return
self.cnt += 1
- self.sn.append([ss, es])
- self.cache.append(data)
- # debug
+ if self.extension > 0:
+ self.ext_sn[self.extension - 1].append([ss, es])
+ self.ext_cache[self.extension - 1].append(data)
+ else:
+ self.sn.append([ss, es])
+ self.cache.append(data)
- if self.state is None:
+ if self.state is None or self.state == 'header':
# Wait for the EDID header
if self.cnt >= OFF_VENDOR:
if self.cache[-8:] == EDID_HEADER:
self.put(ss, es, self.out_ann, [0, ['Checksum: %d (%s)' % (
self.cache[self.cnt-1], csstr)]])
self.state = 'extensions'
+
elif self.state == 'extensions':
- pass
+ cache = self.ext_cache[self.extension - 1]
+ sn = self.ext_sn[self.extension - 1]
+ v = cache[self.cnt - 1]
+ if self.cnt == 1:
+ if v == 2:
+ self.put(ss, es, self.out_ann, [1, ['Extensions Tag', 'Tag']])
+ else:
+ self.put(ss, es, self.out_ann, [1, ['Bad Tag']])
+ elif self.cnt == 2:
+ self.put(ss, es, self.out_ann, [1, ['Version']])
+ self.put(ss, es, self.out_ann, [0, [str(v)]])
+ elif self.cnt == 3:
+ self.put(ss, es, self.out_ann, [1, ['DTD offset']])
+ self.put(ss, es, self.out_ann, [0, [str(v)]])
+ elif self.cnt == 4:
+ self.put(ss, es, self.out_ann, [1, ['Format support | DTD count']])
+ support = "Underscan: {0}, {1} Audio, YCbCr: {2}".format(
+ "yes" if v & 0x80 else "no",
+ "Basic" if v & 0x40 else "No",
+ ["None", "422", "444", "422+444"][(v & 0x30) >> 4])
+ self.put(ss, es, self.out_ann, [0, ['{0}, DTDs: {1}'.format(support, v & 0xf)]])
+ elif self.cnt <= cache[2]:
+ if self.cnt == cache[2]:
+ self.put(sn[4][0], es, self.out_ann, [1, ['Data block collection']])
+ self.decode_data_block_collection(cache[4:], sn[4:])
+ elif (self.cnt - cache[2]) % 18 == 0:
+ n = (self.cnt - cache[2]) / 18
+ if n <= cache[3] & 0xf:
+ self.put(sn[self.cnt - 18][0], es, self.out_ann, [1, ['DTD']])
+ self.decode_descriptors(-18)
+
+ elif self.cnt == 127:
+ dtd_last = cache[2] + (cache[3] & 0xf) * 18
+ self.put(sn[dtd_last][0], es, self.out_ann, [1, ['Padding']])
+ elif self.cnt == 128:
+ checksum = sum(cache) % 256
+ self.put(ss, es, self.out_ann, [0, ['Checksum: %d (%s)' % (
+ cache[self.cnt-1], 'Wrong' if checksum else 'OK')]])
def ann_field(self, start, end, annotation):
- self.put(self.sn[start][0], self.sn[end][1],
- self.out_ann, [ANN_FIELDS, [annotation]])
+ annotation = annotation if isinstance(annotation, list) else [annotation]
+ sn = self.ext_sn[self.extension - 1] if self.extension else self.sn
+ self.put(sn[start][0], sn[end][1],
+ self.out_ann, [ANN_FIELDS, annotation])
def lookup_pnpid(self, pnpid):
pnpid_file = os.path.join(os.path.dirname(__file__), 'pnpids.txt')
datestr += 'week %d, ' % self.cache[offset]
datestr += str(1990 + self.cache[offset+1])
if datestr:
- self.ann_field(offset, offset+1, 'Manufactured ' + datestr)
+ self.ann_field(offset, offset+1, ['Manufactured ' + datestr, datestr])
def decode_basicdisplay(self, offset):
# Video input definition
self.ann_field(offset, offset + 15,
'Supported standard modes: %s' % modestr[:-2])
- def decode_detailed_timing(self, offset):
- if offset == -72 and self.have_preferred_timing:
+ def decode_detailed_timing(self, cache, sn, offset, is_first):
+ if is_first and self.have_preferred_timing:
# Only on first detailed timing descriptor
section = 'Preferred'
else:
section = 'Detailed'
section += ' timing descriptor'
- self.put(self.sn[offset][0], self.sn[offset+17][1],
+
+ self.put(sn[0][0], sn[17][1],
self.out_ann, [ANN_SECTIONS, [section]])
- pixclock = float((self.cache[offset+1] << 8) + self.cache[offset]) / 100
+ pixclock = float((cache[1] << 8) + cache[0]) / 100
self.ann_field(offset, offset+1, 'Pixel clock: %.2f MHz' % pixclock)
- horiz_active = ((self.cache[offset+4] & 0xf0) << 4) + self.cache[offset+2]
- self.ann_field(offset+2, offset+4, 'Horizontal active: %d' % horiz_active)
-
- horiz_blank = ((self.cache[offset+4] & 0x0f) << 8) + self.cache[offset+3]
- self.ann_field(offset+2, offset+4, 'Horizontal blanking: %d' % horiz_blank)
-
- vert_active = ((self.cache[offset+7] & 0xf0) << 4) + self.cache[offset+5]
- self.ann_field(offset+5, offset+7, 'Vertical active: %d' % vert_active)
-
- vert_blank = ((self.cache[offset+7] & 0x0f) << 8) + self.cache[offset+6]
- self.ann_field(offset+5, offset+7, 'Vertical blanking: %d' % vert_blank)
+ horiz_active = ((cache[4] & 0xf0) << 4) + cache[2]
+ horiz_blank = ((cache[4] & 0x0f) << 8) + cache[3]
+ self.ann_field(offset+2, offset+4, 'Horizontal active: %d, blanking: %d' % (horiz_active, horiz_blank))
- horiz_sync_off = ((self.cache[offset+11] & 0xc0) << 2) + self.cache[offset+8]
- self.ann_field(offset+8, offset+11, 'Horizontal sync offset: %d' % horiz_sync_off)
+ vert_active = ((cache[7] & 0xf0) << 4) + cache[5]
+ vert_blank = ((cache[7] & 0x0f) << 8) + cache[6]
+ self.ann_field(offset+5, offset+7, 'Vertical active: %d, blanking: %d' % (vert_active, vert_blank))
- horiz_sync_pw = ((self.cache[offset+11] & 0x30) << 4) + self.cache[offset+9]
- self.ann_field(offset+8, offset+11, 'Horizontal sync pulse width: %d' % horiz_sync_pw)
+ horiz_sync_off = ((cache[11] & 0xc0) << 2) + cache[8]
+ horiz_sync_pw = ((cache[11] & 0x30) << 4) + cache[9]
+ vert_sync_off = ((cache[11] & 0x0c) << 2) + ((cache[10] & 0xf0) >> 4)
+ vert_sync_pw = ((cache[11] & 0x03) << 4) + (cache[10] & 0x0f)
- vert_sync_off = ((self.cache[offset+11] & 0x0c) << 2) \
- + ((self.cache[offset+10] & 0xf0) >> 4)
- self.ann_field(offset+8, offset+11, 'Vertical sync offset: %d' % vert_sync_off)
+ syncs = (horiz_sync_off, horiz_sync_pw, vert_sync_off, vert_sync_pw)
+ self.ann_field(offset+8, offset+11, [
+ 'Horizontal sync offset: %d, pulse width: %d, Vertical sync offset: %d, pulse width: %d' % syncs,
+ 'HSync off: %d, pw: %d, VSync off: %d, pw: %d' % syncs])
- vert_sync_pw = ((self.cache[offset+11] & 0x03) << 4) \
- + (self.cache[offset+10] & 0x0f)
- self.ann_field(offset+8, offset+11, 'Vertical sync pulse width: %d' % vert_sync_pw)
-
- horiz_size = ((self.cache[offset+14] & 0xf0) << 4) + self.cache[offset+12]
- vert_size = ((self.cache[offset+14] & 0x0f) << 8) + self.cache[offset+13]
+ horiz_size = ((cache[14] & 0xf0) << 4) + cache[12]
+ vert_size = ((cache[14] & 0x0f) << 8) + cache[13]
self.ann_field(offset+12, offset+14, 'Physical size: %dx%dmm' % (horiz_size, vert_size))
- horiz_border = self.cache[offset+15]
+ horiz_border = cache[15]
self.ann_field(offset+15, offset+15, 'Horizontal border: %d pixels' % horiz_border)
- vert_border = self.cache[offset+16]
+ vert_border = cache[16]
self.ann_field(offset+16, offset+16, 'Vertical border: %d lines' % vert_border)
features = 'Flags: '
- if self.cache[offset+17] & 0x80:
+ if cache[17] & 0x80:
features += 'interlaced, '
- stereo = (self.cache[offset+17] & 0x60) >> 5
+ stereo = (cache[17] & 0x60) >> 5
if stereo:
- if self.cache[offset+17] & 0x01:
+ if cache[17] & 0x01:
features += '2-way interleaved stereo ('
features += ['right image on even lines',
'left image on even lines',
features += ['right image on sync=1', 'left image on sync=1',
'4-way interleaved'][stereo-1]
features += '), '
- sync = (self.cache[offset+17] & 0x18) >> 3
- sync2 = (self.cache[offset+17] & 0x06) >> 1
+ sync = (cache[17] & 0x18) >> 3
+ sync2 = (cache[17] & 0x06) >> 1
posneg = ['negative', 'positive']
features += 'sync type '
if sync == 0x00:
features += ', '
self.ann_field(offset+17, offset+17, features[:-2])
- def decode_descriptor(self, offset):
- tag = self.cache[offset+3]
+ def decode_descriptor(self, cache, offset):
+ tag = cache[3]
+ self.ann_field(offset, offset+1, "Flag")
+ self.ann_field(offset+2, offset+2, "Flag (reserved)")
+ self.ann_field(offset+3, offset+3, "Tag: {0:X}".format(tag))
+ self.ann_field(offset+4, offset+4, "Flag")
+
+ sn = self.ext_sn[self.extension - 1] if self.extension else self.sn
+
if tag == 0xff:
# Monitor serial number
- self.put(self.sn[offset][0], self.sn[offset+17][1], self.out_ann,
+ self.put(sn[offset][0], sn[offset+17][1], self.out_ann,
[ANN_SECTIONS, ['Serial number']])
- text = bytes(self.cache[offset+5:][:13]).decode(encoding='cp437', errors='replace')
- self.ann_field(offset, offset+17, text.strip())
+ text = bytes(cache[5:][:13]).decode(encoding='cp437', errors='replace')
+ self.ann_field(offset+5, offset+17, text.strip())
elif tag == 0xfe:
# Text
- self.put(self.sn[offset][0], self.sn[offset+17][1], self.out_ann,
+ self.put(sn[offset][0], sn[offset+17][1], self.out_ann,
[ANN_SECTIONS, ['Text']])
- text = bytes(self.cache[offset+5:][:13]).decode(encoding='cp437', errors='replace')
- self.ann_field(offset, offset+17, text.strip())
+ text = bytes(cache[5:][:13]).decode(encoding='cp437', errors='replace')
+ self.ann_field(offset+5, offset+17, text.strip())
elif tag == 0xfc:
# Monitor name
- self.put(self.sn[offset][0], self.sn[offset+17][1], self.out_ann,
+ self.put(sn[offset][0], sn[offset+17][1], self.out_ann,
[ANN_SECTIONS, ['Monitor name']])
- text = bytes(self.cache[offset+5:][:13]).decode(encoding='cp437', errors='replace')
- self.ann_field(offset, offset+17, text.strip())
+ text = bytes(cache[5:][:13]).decode(encoding='cp437', errors='replace')
+ self.ann_field(offset+5, offset+17, text.strip())
elif tag == 0xfd:
# Monitor range limits
- self.put(self.sn[offset][0], self.sn[offset+17][1], self.out_ann,
+ self.put(sn[offset][0], sn[offset+17][1], self.out_ann,
[ANN_SECTIONS, ['Monitor range limits']])
- self.ann_field(offset+5, offset+5, 'Minimum vertical rate: %dHz' %
- self.cache[offset+5])
- self.ann_field(offset+6, offset+6, 'Maximum vertical rate: %dHz' %
- self.cache[offset+6])
- self.ann_field(offset+7, offset+7, 'Minimum horizontal rate: %dkHz' %
- self.cache[offset+7])
- self.ann_field(offset+8, offset+8, 'Maximum horizontal rate: %dkHz' %
- self.cache[offset+8])
- self.ann_field(offset+9, offset+9, 'Maximum pixel clock: %dMHz' %
- (self.cache[offset+9] * 10))
- if self.cache[offset+10] == 0x02:
- # Secondary GTF curve supported
- self.ann_field(offset+10, offset+17, 'Secondary timing formula supported')
+ self.ann_field(offset+5, offset+5, [
+ 'Minimum vertical rate: {0}Hz'.format(cache[5]),
+ 'VSync >= {0}Hz'.format(cache[5])])
+ self.ann_field(offset+6, offset+6, [
+ 'Maximum vertical rate: {0}Hz'.format(cache[6]),
+ 'VSync <= {0}Hz'.format(cache[6])])
+ self.ann_field(offset+7, offset+7, [
+ 'Minimum horizontal rate: {0}kHz'.format(cache[7]),
+ 'HSync >= {0}kHz'.format(cache[7])])
+ self.ann_field(offset+8, offset+8, [
+ 'Maximum horizontal rate: {0}kHz'.format(cache[8]),
+ 'HSync <= {0}kHz'.format(cache[8])])
+ self.ann_field(offset+9, offset+9, [
+ 'Maximum pixel clock: {0}MHz'.format(cache[9] * 10),
+ 'PixClk <= {0}MHz'.format(cache[9] * 10)])
+ if cache[10] == 0x02:
+ self.ann_field(offset+10, offset+10, ['Secondary timing formula supported', '2nd GTF: yes'])
+ self.ann_field(offset+11, offset+17, ['GTF'])
+ else:
+ self.ann_field(offset+10, offset+10, ['Secondary timing formula unsupported', '2nd GTF: no'])
+ self.ann_field(offset+11, offset+17, ['Padding'])
elif tag == 0xfb:
# Additional color point data
- self.put(self.sn[offset][0], self.sn[offset+17][1], self.out_ann,
+ self.put(sn[offset][0], sn[offset+17][1], self.out_ann,
[ANN_SECTIONS, ['Additional color point data']])
elif tag == 0xfa:
# Additional standard timing definitions
- self.put(self.sn[offset][0], self.sn[offset+17][1], self.out_ann,
+ self.put(sn[offset][0], sn[offset+17][1], self.out_ann,
[ANN_SECTIONS, ['Additional standard timing definitions']])
else:
- self.put(self.sn[offset][0], self.sn[offset+17][1], self.out_ann,
+ self.put(sn[offset][0], sn[offset+17][1], self.out_ann,
[ANN_SECTIONS, ['Unknown descriptor']])
def decode_descriptors(self, offset):
# 4 consecutive 18-byte descriptor blocks
+ cache = self.ext_cache[self.extension - 1] if self.extension else self.cache
+ sn = self.ext_sn[self.extension - 1] if self.extension else self.sn
+
for i in range(offset, 0, 18):
- if self.cache[i] != 0 and self.cache[i+1] != 0:
- self.decode_detailed_timing(i)
+ if cache[i] != 0 or cache[i+1] != 0:
+ self.decode_detailed_timing(cache[i:], sn[i:], i, i == offset)
+ else:
+ if cache[i+2] == 0 or cache[i+4] == 0:
+ self.decode_descriptor(cache[i:], i)
+
+ def decode_data_block(self, tag, cache, sn):
+ codes = { 0: ['0: Reserved'],
+ 1: ['1: Audio Data Block', 'Audio'],
+ 2: ['2: Video Data Block', 'Video'],
+ 3: ['3: Vendor Specific Data Block', 'VSDB'],
+ 4: ['4: Speacker Allocation Data Block', 'SADB'],
+ 5: ['5: VESA DTC Data Block', 'DTC'],
+ 6: ['6: Reserved'],
+ 7: ['7: Extended', 'Ext'] }
+ ext_codes = { 0: [ '0: Video Capability Data Block', 'VCDB'],
+ 1: [ '1: Vendor Specific Video Data Block', 'VSVDB'],
+ 17: ['17: Vendor Specific Audio Data Block', 'VSADB'], }
+ if tag < 7:
+ code = codes[tag]
+ ext_len = 0
+ if tag == 1:
+ aformats = { 1: '1 (LPCM)' }
+ rates = [ '192', '176', '96', '88', '48', '44', '32' ]
+
+ aformat = cache[1] >> 3
+ sup_rates = [ i for i in range(0, 8) if (1 << i) & cache[2] ]
+
+ data = "Format: {0} Channels: {1}".format(
+ aformats.get(aformat, aformat), (cache[1] & 0x7) + 1)
+ data += " Rates: " + " ".join(rates[6 - i] for i in sup_rates)
+ data += " Extra: [{0:02X}]".format(cache[3])
+
+ elif tag ==2:
+ data = "VIC: "
+ data += ", ".join("{0}{1}".format(v & 0x7f,
+ ['', ' (Native)'][v >> 7])
+ for v in cache[1:])
+
+ elif tag ==3:
+ ouis = { b'\x00\x0c\x03': 'HDMI Licensing, LLC' }
+ oui = bytes(cache[3:0:-1])
+ ouis = ouis.get(oui, None)
+ data = "OUI: " + " ".join('{0:02X}'.format(x) for x in oui)
+ data += " ({0})".format(ouis) if ouis else ""
+ data += ", PhyAddr: {0}.{1}.{2}.{3}".format(
+ cache[4] >> 4, cache[4] & 0xf, cache[5] >> 4, cache[5] & 0xf)
+ data += ", [" + " ".join('{0:02X}'.format(x) for x in cache[6:]) + "]"
+
+ elif tag ==4:
+ speakers = [ 'FL/FR', 'LFE', 'FC', 'RL/RR',
+ 'RC', 'FLC/FRC', 'RLC/RRC', 'FLW/FRW',
+ 'FLH/FRH', 'TC', 'FCH' ]
+ sup_speakers = cache[1] + (cache[2] << 8)
+ sup_speakers = [ i for i in range(0, 8) if (1 << i) & sup_speakers ]
+ data = "Speakers: " + " ".join(speakers[i] for i in sup_speakers)
+
else:
- if self.cache[i+2] == 0 or self.cache[i+4] == 0:
- self.decode_descriptor(i)
+ data = " ".join('{0:02X}'.format(x) for x in cache[1:])
+
+ else:
+ # Extended tags
+ ext_len = 1
+ ext_code = ext_codes.get(cache[1], ['Unknown', '?'])
+ code = zip(codes[7], [", ", ": "], ext_code)
+ code = [ "".join(x) for x in code ]
+ data = " ".join('{0:02X}'.format(x) for x in cache[2:])
+
+ self.put(sn[0][0], sn[0 + ext_len][1], self.out_ann,
+ [ANN_FIELDS, code])
+ self.put(sn[1 + ext_len][0], sn[len(cache) - 1][1], self.out_ann,
+ [ANN_FIELDS, [data]])
+
+ def decode_data_block_collection(self, cache, sn):
+ offset = 0
+ while offset < len(cache):
+ length = 1 + cache[offset] & 0x1f
+ tag = cache[offset] >> 5
+ self.decode_data_block(tag, cache[offset:offset + length], sn[offset:])
+ offset += length
'max_speed': 400,
},
+ # ON Semiconductor
+ 'onsemi_cat24c256': {
+ 'vendor': 'ON Semiconductor',
+ 'model': 'CAT24C256',
+ 'size': 32 * 1024,
+ 'page_size': 64,
+ 'page_wraparound': True,
+ 'addr_bytes': 2,
+ 'addr_pins': 3,
+ 'max_speed': 1000,
+ },
+ 'onsemi_cat24m01': {
+ 'vendor': 'ON Semiconductor',
+ 'model': 'CAT24M01',
+ 'size': 128 * 1024,
+ 'page_size': 256,
+ 'page_wraparound': True,
+ 'addr_bytes': 2,
+ 'addr_pins': 2, # Pin A0 not connected
+ 'max_speed': 1000,
+ },
+
# Siemens
'siemens_slx_24c01': {
'vendor': 'Siemens',
self.putx([jtag_states.index(self.oldstate), [self.oldstate]])
self.putp(['NEW STATE', self.state])
- # Upon SHIFT-IR/SHIFT-DR collect the current TDI/TDO values.
- if self.state.startswith('SHIFT-'):
+ # Upon SHIFT-*/EXIT1-* collect the current TDI/TDO values.
+ if self.oldstate.startswith('SHIFT-') or \
+ self.oldstate.startswith('EXIT1-'):
if self.first_bit:
self.ss_bitstring = self.samplenum
self.first_bit = False
self.bits_samplenums_tdi.insert(0, [self.samplenum, -1])
self.bits_samplenums_tdo.insert(0, [self.samplenum, -1])
- # Output all TDI/TDO bits if we just switched from SHIFT-* to EXIT1-*.
- if self.oldstate.startswith('SHIFT-') and \
- self.state.startswith('EXIT1-'):
+ # Output all TDI/TDO bits if we just switched to UPDATE-*.
+ if self.state.startswith('UPDATE-'):
self.es_bitstring = self.samplenum
t = self.state[-2:] + ' TDI'
- b = ''.join(map(str, self.bits_tdi))
- h = ' (0x%x' % int('0b' + b, 2) + ')'
- s = t + ': ' + b + h + ', ' + str(len(self.bits_tdi)) + ' bits'
+ b = ''.join(map(str, self.bits_tdi[1:]))
+ h = ' (0x%x' % int('0b0' + b, 2) + ')'
+ s = t + ': ' + b + h + ', ' + str(len(self.bits_tdi[1:])) + ' bits'
self.putx_bs([18, [s]])
- self.bits_samplenums_tdi[0][1] = self.samplenum # ES of last bit.
- self.putp_bs([t, [b, self.bits_samplenums_tdi]])
- self.putx([16, [str(self.bits_tdi[0])]]) # Last bit.
+ self.putp_bs([t, [b, self.bits_samplenums_tdi[1:]]])
self.bits_tdi = []
self.bits_samplenums_tdi = []
t = self.state[-2:] + ' TDO'
- b = ''.join(map(str, self.bits_tdo))
- h = ' (0x%x' % int('0b' + b, 2) + ')'
- s = t + ': ' + b + h + ', ' + str(len(self.bits_tdo)) + ' bits'
+ b = ''.join(map(str, self.bits_tdo[1:]))
+ h = ' (0x%x' % int('0b0' + b, 2) + ')'
+ s = t + ': ' + b + h + ', ' + str(len(self.bits_tdo[1:])) + ' bits'
self.putx_bs([19, [s]])
- self.bits_samplenums_tdo[0][1] = self.samplenum # ES of last bit.
- self.putp_bs([t, [b, self.bits_samplenums_tdo]])
- self.putx([17, [str(self.bits_tdo[0])]]) # Last bit.
+ self.putp_bs([t, [b, self.bits_samplenums_tdo[1:]]])
self.bits_tdo = []
self.bits_samplenums_tdo = []
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Vladislav Ivanov <vlad.ivanov@lab-systems.ru>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+This decoder stacks on top of the 'jtag' PD and decodes JTAG data specific
+to the MIPS EJTAG protocol.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Vladislav Ivanov <vlad.ivanov@lab-systems.ru>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sigrokdecode as srd
+from common.srdhelper import bin2int
+
+class Instruction(object):
+ IDCODE = 0x01
+ IMPCODE = 0x03
+ ADDRESS = 0x08
+ DATA = 0x09
+ CONTROL = 0x0A
+ ALL = 0x0B
+ EJTAGBOOT = 0x0C
+ NORMALBOOT = 0x0D
+ FASTDATA = 0x0E
+ TCBCONTROLA = 0x10
+ TCBCONTROLB = 0x11
+ TCBDATA = 0x12
+ TCBCONTROLC = 0x13
+ PCSAMPLE = 0x14
+ TCBCONTROLD = 0x15
+ TCBCONTROLE = 0x16
+
+class State(object):
+ RESET = 0
+ DEVICE_ID = 1
+ IMPLEMENTATION = 2
+ DATA = 3
+ ADDRESS = 4
+ CONTROL = 5
+ FASTDATA = 6
+ PC_SAMPLE = 7
+ BYPASS = 8
+
+class ControlReg(object):
+ PRACC = (1 << 18)
+ PRNW = (1 << 19)
+
+class Ann(object):
+ INSTRUCTION = 0
+ REGISTER = 1
+ CONTROL_FIELD_IN = 10
+ CONTROL_FIELD_OUT = 11
+ PRACC = 12
+
+ejtag_insn = {
+ 0x00: ['Free', 'Boundary scan'],
+ 0x01: ['IDCODE', 'Select Device Identification (ID) register'],
+ 0x02: ['Free', 'Boundary scan'],
+ 0x03: ['IMPCODE', 'Select Implementation register'],
+ 0x08: ['ADDRESS', 'Select Address register'],
+ 0x09: ['DATA', 'Select Data register'],
+ 0x0A: ['CONTROL', 'Select EJTAG Control register'],
+ 0x0B: ['ALL', 'Select the Address, Data and EJTAG Control registers'],
+ 0x0C: ['EJTAGBOOT', 'Fetch code from the debug exception vector after reset'],
+ 0x0D: ['NORMALBOOT', 'Execute the reset handler after reset'],
+ 0x0E: ['FASTDATA', 'Select the Data and Fastdata registers'],
+ 0x0F: ['Reserved', 'Reserved'],
+ 0x10: ['TCBCONTROLA', 'Select the control register TCBTraceControl'],
+ 0x11: ['TCBCONTROLB', 'Selects trace control block register B'],
+ 0x12: ['TCBDATA', 'Access the registers specified by TCBCONTROLB'],
+ 0x13: ['TCBCONTROLC', 'Select trace control block register C'],
+ 0x14: ['PCSAMPLE', 'Select the PCsample register'],
+ 0x15: ['TCBCONTROLD', 'Select trace control block register D'],
+ 0x16: ['TCBCONTROLE', 'Select trace control block register E'],
+ 0x17: ['FDC', 'Select Fast Debug Channel'],
+ 0x1C: ['Free', 'Boundary scan'],
+}
+
+ejtag_reg = {
+ 0x00: 'RESET',
+ 0x01: 'DEVICE_ID',
+ 0x02: 'IMPLEMENTATION',
+ 0x03: 'DATA',
+ 0x04: 'ADDRESS',
+ 0x05: 'CONTROL',
+ 0x06: 'FASTDATA',
+ 0x07: 'PC_SAMPLE',
+ 0x08: 'BYPASS',
+}
+
+ejtag_control_reg = [
+ [31, 31, 'Rocc', [
+ # Read
+ ['No reset ocurred', 'Reset ocurred'],
+ # Write
+ ['Acknowledge reset', 'No effect'],
+ ]],
+ [30, 29, 'Psz', [
+ ['Access: byte', 'Access: halfword', 'Access: word', 'Access: triple'],
+ ]],
+ [23, 23, 'VPED', [
+ ['VPE disabled', 'VPE enabled'],
+ ]],
+ [22, 22, 'Doze', [
+ ['Processor is not in low-power mode', 'Processor is in low-power mode'],
+ ]],
+ [21, 21, 'Halt', [
+ ['Internal system bus clock is running', 'Internal system bus clock is stopped'],
+ ]],
+ [20, 20, 'Per Rst', [
+ ['No peripheral reset applied', 'Peripheral reset applied'],
+ ['Deassert peripheral reset', 'Assert peripheral reset'],
+ ]],
+ [19, 19, 'PRn W', [
+ ['Read processor access', 'Write processor access'],
+ ]],
+ [18, 18, 'Pr Acc', [
+ ['No pending processor access', 'Pending processor access'],
+ ['Finish processor access', 'Don\'t finish processor access'],
+ ]],
+ [16, 16, 'Pr Rst', [
+ ['No processor reset applied', 'Processor reset applied'],
+ ['Deassert processor reset', 'Assert system reset'],
+ ]],
+ [15, 15, 'Prob En', [
+ ['Probe will not serve processor accesses', 'Probe will service processor accesses'],
+ ]],
+ [14, 14, 'Prob Trap', [
+ ['Default location', 'DMSEG fetch'],
+ ['Set to default location', 'Set to DMSEG fetch'],
+ ]],
+ [13, 13, 'ISA On Debug', [
+ ['MIPS32/MIPS64 ISA', 'microMIPS ISA'],
+ ['Set to MIPS32/MIPS64 ISA', 'Set to microMIPS ISA'],
+ ]],
+ [12, 12, 'EJTAG Brk', [
+ ['No pending debug interrupt', 'Pending debug interrupt'],
+ ['No effect', 'Request debug interrupt'],
+ ]],
+ [3, 3, 'DM', [
+ ['Not in debug mode', 'In debug mode'],
+ ]],
+]
+
+ejtag_state_map = {
+ Instruction.IDCODE: State.DEVICE_ID,
+ Instruction.IMPCODE: State.IMPLEMENTATION,
+ Instruction.DATA: State.DATA,
+ Instruction.ADDRESS: State.ADDRESS,
+ Instruction.CONTROL: State.CONTROL,
+ Instruction.FASTDATA: State.FASTDATA,
+}
+
+class RegData(object):
+ def __init__(self):
+ self.ss = None
+ self.es = None
+ self.data = None
+
+class LastData(object):
+ def __init__(self):
+ self.data_in = RegData()
+ self.data_out = RegData()
+
+class PraccState(object):
+ def reset(self):
+ self.address_in = None
+ self.address_out = None
+ self.data_in = None
+ self.data_out = None
+ self.write = False
+ self.ss = 0
+ self.es = 0
+
+ def __init__(self):
+ self.reset()
+
+regs_items = {
+ 'ann': tuple([tuple([s.lower(), s]) for s in list(ejtag_reg.values())]),
+ 'rows_range': tuple(range(1, 1 + 9)),
+}
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'jtag_ejtag'
+ name = 'JTAG / EJTAG (MIPS)'
+ longname = 'Joint Test Action Group / EJTAG (MIPS)'
+ desc = 'MIPS EJTAG protocol.'
+ license = 'gplv2+'
+ inputs = ['jtag']
+ outputs = ['jtag_ejtag']
+ annotations = (
+ ('instruction', 'Instruction'),
+ ) + regs_items['ann'] + (
+ ('control_field_in', 'Control field in'),
+ ('control_field_out', 'Control field out'),
+ ('pracc', 'PrAcc'),
+ )
+ annotation_rows = (
+ ('instructions', 'Instructions', (0,)),
+ ('regs', 'Registers', regs_items['rows_range']),
+ ('control_fields_in', 'Control fields in', (10,)),
+ ('control_fields_out', 'Control fields out', (11,)),
+ ('pracc', 'PrAcc', (12,)),
+ )
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.state = State.RESET
+ self.pracc_state = PraccState()
+
+ def put_current(self, data):
+ self.put(self.ss, self.es, self.out_ann, data)
+
+ def put_at(self, ss: int, es: int, data):
+ self.put(ss, es, self.out_ann, data);
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+
+ def select_reg(self, ir_value: int):
+ self.state = ejtag_state_map.get(ir_value, State.RESET)
+
+ def parse_pracc(self):
+ control_in = bin2int(self.last_data['in']['data'][0])
+ control_out = bin2int(self.last_data['out']['data'][0])
+
+ # Check if JTAG master acknowledges a pending PrAcc.
+ if not ((not (control_in & ControlReg.PRACC)) and \
+ (control_out & ControlReg.PRACC)):
+ return
+
+ ss, es = self.pracc_state.ss, self.pracc_state.es
+ pracc_write = (control_out & ControlReg.PRNW) != 0
+
+ s = 'PrAcc: '
+ s += 'Store' if pracc_write else 'Load/Fetch'
+
+ if pracc_write:
+ if self.pracc_state.address_out != None:
+ s += ', A:' + ' 0x{:08X}'.format(self.pracc_state.address_out)
+ if self.pracc_state.data_out != None:
+ s += ', D:' + ' 0x{:08X}'.format(self.pracc_state.data_out)
+ else:
+ if self.pracc_state.address_out != None:
+ s += ', A:' + ' 0x{:08X}'.format(self.pracc_state.address_out)
+ if self.pracc_state.data_in != None:
+ s += ', D:' + ' 0x{:08X}'.format(self.pracc_state.data_in)
+
+ self.pracc_state.reset()
+
+ self.put_at(ss, es, [Ann.PRACC, [s]])
+
+ def parse_control_reg(self, ann):
+ reg_write = ann == Ann.CONTROL_FIELD_IN
+ control_bit_positions = []
+ data_select = 'in' if (reg_write) else 'out'
+
+ control_bit_positions = self.last_data[data_select]['data'][1]
+ control_data = self.last_data[data_select]['data'][0]
+
+ # Annotate control register fields.
+ for field in ejtag_control_reg:
+ start_bit = 31 - field[1]
+ end_bit = 31 - field[0]
+ comment = field[2]
+ value_descriptions = []
+
+ if reg_write:
+ if len(field[3]) < 2:
+ continue
+ value_descriptions = field[3][1]
+ else:
+ value_descriptions = field[3][0]
+
+ ss = control_bit_positions[start_bit][0]
+ es = control_bit_positions[end_bit][1]
+
+ value_str = control_data[end_bit : start_bit + 1]
+ value_index = bin2int(value_str)
+
+ short_desc = comment + ': ' + value_str
+ long_desc = value_descriptions[value_index] if len(value_descriptions) > value_index else '?'
+
+ self.put_at(ss, es, [ann, [long_desc, short_desc]])
+
+ def check_last_data(self):
+ if not hasattr(self, 'last_data'):
+ self.last_data = {'in': {}, 'out': {}}
+
+ def handle_fastdata(self, val, ann):
+ spracc_write_desc = {
+ 0: ['0', 'SPrAcc: 0', 'Request completion of Fastdata access'],
+ 1: ['1', 'SPrAcc: 1', 'No effect'],
+ }
+ spracc_read_desc = {
+ 0: ['0', 'SPrAcc: 0', 'Fastdata access failure'],
+ 1: ['1', 'SPrAcc: 1', 'Successful completion of Fastdata access'],
+ }
+
+ bitstring = val[0]
+ bit_sample_pos = val[1]
+ fastdata_state = bitstring[32]
+ data = bin2int(bitstring[0:32])
+
+ fastdata_bit_pos = bit_sample_pos[32]
+ data_pos = [bit_sample_pos[31][0], bit_sample_pos[0][1]]
+
+ ss_fastdata, es_fastdata = fastdata_bit_pos
+ ss_data, es_data = data_pos
+
+ display_data = [ann, ['0x{:08X}'.format(data)]]
+ spracc_display_data = []
+
+ if ann == Ann.CONTROL_FIELD_IN:
+ spracc_display_data = [ann, spracc_write_desc[int(fastdata_state)]]
+ elif ann == Ann.CONTROL_FIELD_OUT:
+ spracc_display_data = [ann, spracc_read_desc[int(fastdata_state)]]
+
+ self.put_at(ss_fastdata, es_fastdata, spracc_display_data)
+ self.put_at(ss_data, es_data, display_data)
+
+ def handle_dr_tdi(self, val):
+ value = bin2int(val[0])
+ self.check_last_data()
+ self.last_data['in'] = {'ss': self.ss, 'es': self.es, 'data': val}
+
+ self.pracc_state.ss, self.pracc_state.es = self.ss, self.es
+
+ if self.state == State.ADDRESS:
+ self.pracc_state.address_in = value
+ elif self.state == State.DATA:
+ self.pracc_state.data_in = value
+ elif self.state == State.FASTDATA:
+ self.handle_fastdata(val, Ann.CONTROL_FIELD_IN)
+
+ def handle_dr_tdo(self, val):
+ value = bin2int(val[0])
+ self.check_last_data()
+ self.last_data['out'] = {'ss': self.ss, 'es': self.es, 'data': val}
+ if self.state == State.ADDRESS:
+ self.pracc_state.address_out = value
+ elif self.state == State.DATA:
+ self.pracc_state.data_out = value
+ elif self.state == State.FASTDATA:
+ self.handle_fastdata(val, Ann.CONTROL_FIELD_OUT)
+
+ def handle_ir_tdi(self, val):
+ code = bin2int(val[0])
+ hex = '0x{:02X}'.format(code)
+ if code in ejtag_insn:
+ # Format instruction name.
+ insn = ejtag_insn[code]
+ s_short = insn[0]
+ s_long = insn[0] + ': ' + insn[1] + ' (' + hex + ')'
+ # Display it and select data register.
+ self.put_current([Ann.INSTRUCTION, [s_long, s_short]])
+ else:
+ self.put_current([Ann.INSTRUCTION, [hex, 'IR TDI ({})'.format(hex)]])
+ self.select_reg(code)
+
+ def handle_new_state(self, new_state):
+ if new_state != 'UPDATE-DR' or not hasattr(self, 'last_data'):
+ return
+
+ if self.state == State.RESET:
+ return
+
+ reg_name = ejtag_reg[self.state]
+ ann_index = Ann.REGISTER + self.state
+ display_data = [ann_index, [reg_name]]
+ self.put_at(self.last_data['in']['ss'], self.last_data['in']['es'], display_data)
+
+ if self.state == State.CONTROL:
+ control_bit_positions = self.last_data['in']['data'][1]
+ bit_count = len(control_bit_positions)
+ # Check if control register data length is correct.
+ if bit_count != 32:
+ error_display = [Ann.REGISTER, ['Error: length != 32']]
+ self.put_at(self.last_data['in']['ss'], self.last_data['in']['es'], error_display)
+ return
+ self.parse_control_reg(Ann.CONTROL_FIELD_IN)
+ self.parse_control_reg(Ann.CONTROL_FIELD_OUT)
+ self.parse_pracc()
+
+ def decode(self, ss: int, es: int, data):
+ cmd, val = data
+ self.ss, self.es = ss, es
+
+ if cmd == 'IR TDI':
+ self.handle_ir_tdi(val)
+ elif cmd == 'DR TDI':
+ self.handle_dr_tdi(val)
+ elif cmd == 'DR TDO':
+ self.handle_dr_tdo(val)
+ elif cmd == 'NEW STATE':
+ self.handle_new_state(val)
'1000': ['ABORT', 35], # Abort register # TODO: 32 bits? Datasheet typo?
}
+# Boundary scan data registers (in IR[8:4]) and their sizes (in bits)
+bs_ir = {
+ '11111': ['BYPASS', 1], # Bypass register
+}
+
# ARM Cortex-M3 r1p1-01rel0 ID code
cm3_idcode = 0x3ba00477
self.putx([0, ['BYPASS: ' + bits]])
def handle_reg_idcode(self, cmd, bits):
- # IDCODE is a read-only register which is always accessible.
- # IR == IDCODE: The 32bit device ID code is shifted out via DR next.
-
- id_hex, manuf, ver, part = decode_device_id_code(bits[:-1])
- cc = '0x%x' % int('0b' + bits[:-1][-12:-8], 2)
- ic = '0x%x' % int('0b' + bits[:-1][-7:-1], 2)
-
- self.putf(0, 0, [1, ['Reserved (BS TAP)', 'BS', 'B']])
- self.putf(1, 1, [1, ['Reserved', 'Res', 'R']])
- self.putf(9, 12, [0, ['Continuation code: %s' % cc, 'CC', 'C']])
- self.putf(2, 8, [0, ['Identity code: %s' % ic, 'IC', 'I']])
- self.putf(2, 12, [1, ['Manufacturer: %s' % manuf, 'Manuf', 'M']])
- self.putf(13, 28, [1, ['Part: %s' % part, 'Part', 'P']])
- self.putf(29, 32, [1, ['Version: %s' % ver, 'Version', 'V']])
-
- self.ss = self.samplenums[1][0]
+ bits = bits[1:]
+
+ id_hex, manuf, ver, part = decode_device_id_code(bits)
+ cc = '0x%x' % int('0b' + bits[-12:-8], 2)
+ ic = '0x%x' % int('0b' + bits[-7:-1], 2)
+
+ self.putf(0, 0, [1, ['Reserved', 'Res', 'R']])
+ self.putf(8, 11, [0, ['Continuation code: %s' % cc, 'CC', 'C']])
+ self.putf(1, 7, [0, ['Identity code: %s' % ic, 'IC', 'I']])
+ self.putf(1, 11, [1, ['Manufacturer: %s' % manuf, 'Manuf', 'M']])
+ self.putf(12, 27, [1, ['Part: %s' % part, 'Part', 'P']])
+ self.putf(28, 31, [1, ['Version: %s' % ver, 'Version', 'V']])
+ self.putf(32, 32, [1, ['BYPASS (BS TAP)', 'BS', 'B']])
+
self.putx([2, ['IDCODE: %s (%s: %s/%s)' % \
- decode_device_id_code(bits[:-1])]])
+ decode_device_id_code(bits)]])
def handle_reg_dpacc(self, cmd, bits):
- bits = bits[:-1]
+ bits = bits[1:]
s = data_in('DPACC', bits) if (cmd == 'DR TDI') else data_out(bits)
self.putx([2, [s]])
def handle_reg_apacc(self, cmd, bits):
- bits = bits[:-1]
+ bits = bits[1:]
s = data_in('APACC', bits) if (cmd == 'DR TDI') else data_out(bits)
self.putx([2, [s]])
def handle_reg_abort(self, cmd, bits):
- bits = bits[:-1]
+ bits = bits[1:]
# Bits[31:1]: reserved. Bit[0]: DAPABORT.
a = '' if (bits[0] == '1') else 'No '
s = 'DAPABORT = %s: %sDAP abort generated' % (bits[0], a)
self.putx([3, ['WARNING: DAPABORT[31:1] reserved!']])
def handle_reg_unknown(self, cmd, bits):
- bits = bits[:-1]
+ bits = bits[1:]
self.putx([2, ['Unknown instruction: %s' % bits]])
def decode(self, ss, es, data):
val, self.samplenums = val
self.samplenums.reverse()
- # State machine
- if self.state == 'IDLE':
- # Wait until a new instruction is shifted into the IR register.
- if cmd != 'IR TDI':
- return
+ if cmd == 'IR TDI':
# Switch to the state named after the instruction, or 'UNKNOWN'.
# The STM32F10xxx has two serially connected JTAG TAPs, the
# boundary scan tap (5 bits) and the Cortex-M3 TAP (4 bits).
# See UM 31.5 "STM32F10xxx JTAG TAP connection" for details.
- self.state = ir.get(val[:-1][-4:], ['UNKNOWN', 0])[0]
- bstap_ir = ir.get(val[:-1][:4], ['UNKNOWN', 0])[0]
- self.putf(5, 8, [1, ['IR (BS TAP): ' + bstap_ir]])
- self.putf(1, 4, [1, ['IR (M3 TAP): ' + self.state]])
- self.putf(0, 0, [1, ['Reserved (BS TAP)', 'BS', 'B']])
+ self.state = ir.get(val[5:9], ['UNKNOWN', 0])[0]
+ bstap_ir = bs_ir.get(val[:5], ['UNKNOWN', 0])[0]
+ self.putf(4, 8, [1, ['IR (BS TAP): ' + bstap_ir]])
+ self.putf(0, 3, [1, ['IR (M3 TAP): ' + self.state]])
self.putx([2, ['IR: %s' % self.state]])
- elif self.state == 'BYPASS':
+
+ # State machine
+ if self.state == 'BYPASS':
# Here we're interested in incoming bits (TDI).
if cmd != 'DR TDI':
return
# Most (but not all) states need this.
if self.state != 'IDLE':
lad = (lad3 << 3) | (lad2 << 2) | (lad1 << 1) | lad0
- lad_bits = bin(lad)[2:].zfill(4)
+ lad_bits = '{:04b}'.format(lad)
# self.putb([0, ['LAD: %s' % lad_bits]])
# TODO: Only memory read/write is currently supported/tested.
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 fenugrec <fenugrec@users.sourceforge.net>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+This protocol decoder de-multiplexes Intel MCS-48 (8039, 8048, etc.) external
+program memory accesses. This requires 14 channels: 8 for D0-D7 (data and
+lower 8 bits of address), 4 for A8-A11 (output on port P2), ALE and PSEN.
+An optional A12 is supported, which may be an arbitrary I/O pin driven by
+software (use case is dumping ROM of an HP 3478A).
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 fenugrec <fenugrec@users.sourceforge.net>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sigrokdecode as srd
+
+class ChannelError(Exception):
+ pass
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'mcs48'
+ name = 'MCS-48'
+ longname = 'Intel MCS-48'
+ desc = 'Intel MCS-48 external memory access protocol.'
+ license = 'gplv2+'
+ inputs = ['logic']
+ outputs = ['mcs48']
+ channels = (
+ {'id': 'ale', 'name': 'ALE', 'desc': 'Address latch enable'},
+ {'id': 'psen', 'name': '/PSEN', 'desc': 'Program store enable'},
+ ) + tuple({
+ 'id': 'd%d' % i,
+ 'name': 'D%d' % i,
+ 'desc': 'CPU data line %d' % i
+ } for i in range(0, 8)
+ ) + tuple({
+ 'id': 'a%d' % i,
+ 'name': 'A%d' % i,
+ 'desc': 'CPU address line %d' % i
+ } for i in range(8, 12)
+ )
+ optional_channels = tuple({
+ 'id': 'a%d' % i,
+ 'name': 'A%d' % i,
+ 'desc': 'CPU address line %d' % i
+ } for i in range(12, 13)
+ )
+ annotations = (
+ ('romdata', 'Address:Data'),
+ )
+ binary = (
+ ('romdata', 'AAAA:DD'),
+ )
+ OFF_ALE, OFF_PSEN = 0, 1
+ OFF_DATA_BOT, OFF_DATA_TOP = 2, 10
+ OFF_ADDR_BOT, OFF_ADDR_TOP = 10, 14
+ OFF_BANK_BOT, OFF_BANK_TOP = 14, 15
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.addr = 0
+ self.addr_s = 0
+ self.data = 0
+ self.data_s = 0
+
+ # Flag to make sure we get an ALE pulse first.
+ self.started = 0
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+ self.out_bin = self.register(srd.OUTPUT_BINARY)
+
+ def newaddr(self, addr, data):
+ # Falling edge on ALE: reconstruct address.
+ self.started = 1
+ addr = sum([bit << i for i, bit in enumerate(addr)])
+ addr <<= len(data)
+ addr |= sum([bit << i for i, bit in enumerate(data)])
+ self.addr = addr
+ self.addr_s = self.samplenum
+
+ def newdata(self, data):
+ # Edge on PSEN: get data.
+ data = sum([bit << i for i, bit in enumerate(data)])
+ self.data = data
+ self.data_s = self.samplenum
+ if self.started:
+ anntext = '{:04X}:{:02X}'.format(self.addr, self.data)
+ self.put(self.addr_s, self.data_s, self.out_ann, [0, [anntext]])
+ bindata = self.addr.to_bytes(2, byteorder='big')
+ bindata += self.data.to_bytes(1, byteorder='big')
+ self.put(self.addr_s, self.data_s, self.out_bin, [0, bindata])
+
+ def decode(self):
+ # Address bits above A11 are optional, and are considered to be A12+.
+ # This logic needs more adjustment when more bank address pins are
+ # to get supported. For now, having just A12 is considered sufficient.
+ has_bank = self.has_channel(self.OFF_BANK_BOT)
+ bank_pin_count = 1 if has_bank else 0
+ # Sample address on the falling ALE edge.
+ # Save data on falling edge of PSEN.
+ while True:
+ pins = self.wait([{self.OFF_ALE: 'f'}, {self.OFF_PSEN: 'r'}])
+ data = pins[self.OFF_DATA_BOT:self.OFF_DATA_TOP]
+ addr = pins[self.OFF_ADDR_BOT:self.OFF_ADDR_TOP]
+ bank = pins[self.OFF_BANK_BOT:self.OFF_BANK_TOP]
+ if has_bank:
+ addr += bank[:bank_pin_count]
+ # Handle those conditions (one or more) that matched this time.
+ if self.matched[0]:
+ self.newaddr(addr, data)
+ if self.matched[1]:
+ self.newdata(data)
if self.clause45 and self.clause45_addr != -1:
decoded_min += str.format('ADDR: %04X ' % self.clause45_addr)
elif self.clause45:
- decoded_min += str.format('ADDR: UKWN ' % self.clause45_addr)
+ decoded_min += str.format('ADDR: UKWN ')
if self.clause45 and self.opcode > 1 \
or (not self.clause45 and self.opcode):
def state_DATA(self, mdio):
if self.data == -1:
self.data = 0
- self.putff([2, ['TURNAROUND', 'TA', 'T']])
+ self.putff([2, ['TA', 'T']])
if self.ta_invalid:
- self.putff([4, ['TURNAROUND%s' % self.ta_invalid,
- 'TA%s' % self.ta_invalid, 'TA', 'T']])
+ self.putff([4, ['TA%s' % self.ta_invalid, 'TA', 'T']])
self.ss_frame_field = self.samplenum
self.data_bits -= 1
self.data |= mdio << self.data_bits
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2017 Christoph Rackwitz <christoph.rackwitz@rwth-aachen.de>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+The Miller protocol decoder supports (modified) Miller encoded data.
+
+E.g. used in NFC communication at 106 kbaud.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2017 Christoph Rackwitz <christoph.rackwitz@rwth-aachen.de>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+# http://www.gorferay.com/type-a-communications-interface/
+# https://resources.infosecinstitute.com/introduction-rfid-security/
+# https://www.radio-electronics.com/info/wireless/nfc/near-field-communications-modulation-rf-signal-interface.php
+# https://www.researchgate.net/figure/Modified-Miller-Code_fig16_283498836
+
+# Miller: either edge
+# modified Miller: falling edge
+
+import sigrokdecode as srd
+
+def roundto(x, k=1.0):
+ return round(x / k) * k
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'miller'
+ name = 'Miller'
+ longname = 'Miller encoding'
+ desc = 'Miller encoding protocol.'
+ license = 'gplv2+'
+ inputs = ['logic']
+ outputs = ['miller']
+ channels = (
+ {'id': 'data', 'name': 'Data', 'desc': 'Data signal'},
+ )
+ options = (
+ {'id': 'baudrate', 'desc': 'Baud rate', 'default': 106000},
+ {'id': 'edge', 'desc': 'Edge', 'default': 'falling', 'values': ('rising', 'falling', 'either')},
+ )
+ annotations = (
+ ('bit', 'Bit'),
+ ('bitstring', 'Bitstring'),
+ )
+ annotation_rows = tuple((u, v, (i,)) for i, (u, v) in enumerate(annotations))
+ binary = (
+ ('raw', 'Raw binary'),
+ )
+
+ def __init__(self):
+ self.samplerate = None
+
+ def metadata(self, key, value):
+ if key == srd.SRD_CONF_SAMPLERATE:
+ self.samplerate = value
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+ self.out_binary = self.register(srd.OUTPUT_BINARY)
+
+ def decode_bits(self):
+ timeunit = self.samplerate / self.options['baudrate']
+ edgetype = self.options['edge'][0]
+
+ self.wait({0: edgetype}) # first symbol, beginning of unit
+ prevedge = self.samplenum
+
+ # start of message: '0'
+ prevbit = 0
+ yield (0, prevedge, prevedge + timeunit)
+ expectedstart = self.samplenum + timeunit
+
+ # end of message: '0' followed by one idle symbol
+
+ while True:
+ self.wait([{0: edgetype}, {'skip': int(3 * timeunit)}])
+ got_timeout = self.matched[1]
+ sampledelta = (self.samplenum - prevedge)
+ prevedge = self.samplenum
+ timedelta = roundto(sampledelta / timeunit, 0.5)
+
+ # a mark stands for a 1 bit
+ # a mark has an edge in the middle
+
+ # a space stands for a 0 bit
+ # a space either has an edge at the beginning or no edge at all
+ # after a mark, a space is edge-less
+ # after a space, a space has an edge
+
+ # we get 1.0, 1.5, 2.0 times between edges
+
+ # end of transmission is always a space, either edged or edge-less
+
+ if prevbit == 0: # space -> ???
+ if timedelta == 1.0: # 1.0 units -> space
+ yield (0, self.samplenum, self.samplenum + timeunit)
+ prevbit = 0
+ expectedstart = self.samplenum + timeunit
+ elif timedelta == 1.5: # 1.5 units -> mark
+ yield (1, expectedstart, self.samplenum + 0.5*timeunit)
+ prevbit = 1
+ expectedstart = self.samplenum + timeunit*0.5
+ elif timedelta >= 2.0:
+ # idle symbol (end of message)
+ yield None
+ else:
+ # assert timedelta >= 2.0
+ yield (False, self.samplenum - sampledelta, self.samplenum)
+ break
+ else: # mark -> ???
+ if timedelta <= 0.5:
+ yield (False, self.samplenum - sampledelta, self.samplenum)
+ break
+ if timedelta == 1.0: # 1.0 units -> mark again (1.5 from start)
+ yield (1, expectedstart, self.samplenum + 0.5*timeunit)
+ prevbit = 1
+ expectedstart = self.samplenum + 0.5*timeunit
+ elif timedelta == 1.5: # 1.5 units -> space (no pulse) and space (pulse)
+ yield (0, expectedstart, self.samplenum)
+ yield (0, self.samplenum, self.samplenum + timeunit)
+ prevbit = 0
+ expectedstart = self.samplenum + timeunit
+ elif timedelta == 2.0: # 2.0 units -> space (no pulse) and mark (pulse)
+ yield (0, expectedstart, expectedstart + timeunit)
+ yield (1, self.samplenum - 0.5*timeunit, self.samplenum + 0.5*timeunit)
+ prevbit = 1
+ expectedstart = self.samplenum + timeunit*0.5
+ else: # longer -> space and end of message
+ yield (0, expectedstart, expectedstart + timeunit)
+ yield None
+ break
+
+ def decode_run(self):
+ numbits = 0
+ bitvalue = 0
+ bitstring = ''
+ stringstart = None
+ stringend = None
+
+ for bit in self.decode_bits():
+ if bit is None:
+ break
+
+ (value, ss, es) = bit
+
+ if value is False:
+ self.put(int(ss), int(es), self.out_ann, [1, ['ERROR']])
+ else:
+ self.put(int(ss), int(es), self.out_ann, [0, ['{}'.format(value)]])
+
+ if value is False:
+ numbits = 0
+ break
+
+ if stringstart is None:
+ stringstart = ss
+
+ stringend = es
+
+ bitvalue |= value << numbits
+ numbits += 1
+
+ bitstring += '{}'.format(value)
+ if numbits % 4 == 0:
+ bitstring += ' '
+
+ if not numbits:
+ return
+
+ self.put(int(stringstart), int(stringend), self.out_ann, [1, ['{}'.format(bitstring)]])
+
+ numbytes = numbits // 8 + (numbits % 8 > 0)
+ bytestring = bitvalue.to_bytes(numbytes, 'little')
+ self.put(int(stringstart), int(stringend), self.out_binary, [0, bytestring])
+
+ def decode(self):
+ while True:
+ self.decode_run()
curtime = self.samplenum
dt = (curtime - prevtime) / self.samplerate
units = dt / timeunit
- iunits = round(units)
+ iunits = int(max(1, round(units)))
error = abs(units - iunits)
symbol = (pval, iunits)
continue
self.put(prevtime, curtime, self.out_ann, [0, ['{:.3g}'.format(dt)]])
- self.put(prevtime, curtime, self.out_ann, [1, ['{:.1f}*{:.3g}'.format(units, timeunit)]])
if symbol in symbols:
+ self.put(prevtime, curtime, self.out_ann, [1, ['{:.1f}*{:.3g}'.format(units, timeunit)]])
yield (prevtime, curtime, symbol)
+ else:
+ self.put(prevtime, curtime, self.out_ann, [1, ['!! {:.1f}*{:.3g} !!'.format(units, timeunit)]])
prevtime = curtime
thisunit = dt / iunits
- timeunit += (thisunit - timeunit) * 0.02 * iunits # Adapt.
+ timeunit += (thisunit - timeunit) * 0.2 * max(0, 1 - 2*error) # Adapt.
def decode_morse(self):
# Group symbols into letters.
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Steve R <steversig@virginmedia.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+OOK decodes On-off keying based remote control protocols.
+
+It is aimed at 433MHz but should also work with other common RC frequencies.
+The input can be captured directly from a transmitter (before the modulation
+stage) or demodulated by an RF receiver.
+
+Over the air captured traces will be a lot noisier and will probably need the
+area of interest to be zoomed onto, then selected with the "Cursors" and the
+"Save Selected Range As" feature to be used to extract it from the noise.
+
+There is a limited amount of pre-filtering and garbage removal built into the
+decoder which can sometimes extract signals directly from a larger over the air
+trace. It depends heavily on your environment.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Steve R <steversig@virginmedia.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sigrokdecode as srd
+
+'''
+OUTPUT_PYTHON format:
+Samples: The Samples array is sent when a DECODE_TIMEOUT occurs.
+[<start>, <finish>, <state>]
+<start> is the sample number of the start of the decoded bit. This may not line
+up with the pulses that were converted into the decoded bit particularly for
+Manchester encoding.
+<finish> is the sample number of the end of the decoded bit.
+<state> is a single character string which is the state of the decoded bit.
+This can be
+'0' zero or low
+'1' one or high
+'E' Error or invalid. This can be caused by missing transitions or the wrong
+pulse lengths according to the rules for the particular encoding. In some cases
+this is intentional (Oregon 1 preamble) and is part of the sync pattern. In
+other cases the signal could simply be broken.
+
+If there are more than self.max_errors (default 5) in decoding then the
+OUTPUT_PYTHON is not sent as the data is assumed to be worthless.
+There also needs to be a low for five times the preamble period at the end of
+each set of pulses to trigger a DECODE_TIMEOUT and get the OUTPUT_PYTHON sent.
+'''
+
+class SamplerateError(Exception):
+ pass
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'ook'
+ name = 'OOK'
+ longname = 'On-off keying'
+ desc = 'On-off keying protocol.'
+ license = 'gplv2+'
+ inputs = ['logic']
+ outputs = ['ook']
+ channels = (
+ {'id': 'data', 'name': 'Data', 'desc': 'Data line'},
+ )
+ annotations = (
+ ('frame', 'Frame'),
+ ('info', 'Info'),
+ ('1111', '1111'),
+ ('1010', '1010'),
+ ('diffman', 'Diff Man'),
+ ('nrz', 'NRZ'),
+ )
+ annotation_rows = (
+ ('frame', 'Framing',(0,)),
+ ('info', 'Info', (1,)),
+ ('man1111', 'Man 1111', (2,)),
+ ('man1010', 'Man 1010', (3,)),
+ ('diffman', 'Diff Man', (4,)),
+ ('nrz', 'NRZ', (5,)),
+ )
+ binary = (
+ ('pulse-lengths', 'Pulse lengths'),
+ )
+ options = (
+ {'id': 'invert', 'desc': 'Invert data', 'default': 'no',
+ 'values': ('no', 'yes')},
+ {'id': 'decodeas', 'desc': 'Decode type', 'default': 'Manchester',
+ 'values': ('NRZ', 'Manchester', 'Diff Manchester')},
+ {'id': 'preamble', 'desc': 'Preamble', 'default': 'auto',
+ 'values': ('auto', '1010', '1111')},
+ {'id': 'preamlen', 'desc': 'Filter length', 'default': '7',
+ 'values': ('0', '3', '4', '5', '6', '7', '8', '9', '10')},
+ {'id': 'diffmanvar', 'desc': 'Transition at start', 'default': '1',
+ 'values': ('1', '0')},
+ )
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.samplerate = None
+ self.ss = self.es = -1
+ self.ss_1111 = self.ss_1010 = -1
+ self.samplenumber_last = None
+ self.sample_first = None
+ self.sample_high = 0
+ self.sample_low = 0
+ self.edge_count = 0
+ self.word_first = None
+ self.word_count = 0
+ self.state = 'IDLE'
+ self.lstate = None
+ self.lstate_1010 = None
+ self.insync = 0 # Preamble in sync flag
+ self.man_errors = 0
+ self.man_errors_1010 = 0
+ self.preamble = [] # Preamble buffer
+ self.half_time = -1 # Half time for man 1111
+ self.half_time_1010 = 0 # Half time for man 1010
+ self.pulse_lengths = [] # Pulse lengths
+ self.decoded = [] # Decoded stream
+ self.decoded_1010 = [] # Decoded stream
+ self.diff_man_trans = '0' # Transition
+ self.diff_man_len = 1 # Length of pulse in half clock periods
+ self.max_errors = 5 # Max number of errors to output OOK
+
+ def metadata(self, key, value):
+ if key == srd.SRD_CONF_SAMPLERATE:
+ self.samplerate = value
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+ self.out_python = self.register(srd.OUTPUT_PYTHON)
+ self.out_binary = self.register(srd.OUTPUT_BINARY)
+ self.invert = self.options['invert']
+ self.decodeas = self.options['decodeas']
+ self.preamble_val = self.options['preamble']
+ self.preamble_len = self.options['preamlen']
+ self.diffmanvar = self.options['diffmanvar']
+
+ def putx(self, data):
+ self.put(self.ss, self.es, self.out_ann, data)
+
+ def putp(self, data):
+ self.put(self.ss, self.es, self.out_python, data)
+
+ def dump_pulse_lengths(self):
+ if self.samplerate:
+ self.pulse_lengths[-1] = self.sample_first # Fix final pulse length.
+ s = 'Pulses(us)='
+ s += ','.join(str(int(int(x) * 1000000 / self.samplerate))
+ for x in self.pulse_lengths)
+ s += '\n'
+ self.put(self.samplenum - 10, self.samplenum, self.out_binary,
+ [0, bytes([ord(c) for c in s])])
+
+ def decode_nrz(self, start, samples, state):
+ self.pulse_lengths.append(samples)
+ # Use different high and low widths to compensate skewed waveforms.
+ dsamples = self.sample_high if state == '1' else self.sample_low
+ self.ss, self.es = start, start + samples
+ while samples > dsamples * 0.5:
+ if samples >= dsamples * 1.5: # More than one bit.
+ self.es = self.ss + dsamples
+ self.putx([5, [state]])
+ self.decoded.append([self.ss, self.es, state])
+ self.edge_count += 1
+ elif samples >= dsamples * 0.5 and samples < dsamples * 1.5: # Last bit.
+ self.putx([5, [state]])
+ self.decoded.append([self.ss, self.es, state])
+ self.edge_count += 1
+ else:
+ self.edge_count += 1
+ samples -= dsamples
+ self.ss += dsamples
+ self.es += dsamples
+
+ # Ensure 2nd row doesn't go past end of 1st row.
+ if self.es > self.samplenum:
+ self.es = self.samplenum
+
+ if self.state == 'DECODE_TIMEOUT': # Five bits - reset.
+ self.ss = self.decoded[0][0]
+ self.es = self.decoded[len(self.decoded) - 1][1]
+ self.dump_pulse_lengths()
+ self.putp(self.decoded)
+ self.decode_timeout()
+ break
+
+ def lock_onto_preamble(self, samples, state): # Filters and recovers clock.
+ self.edge_count += 1
+ l2s = 5 # Max ratio of long to short pulses.
+
+ # Filter incoming pulses to remove random noise.
+ if self.state == 'DECODE_TIMEOUT':
+ self.preamble = []
+ self.edge_count == 0
+ self.word_first = self.samplenum
+ self.sample_first = self.samplenum - self.samplenumber_last
+ self.state = 'WAITING_FOR_PREAMBLE'
+ self.man_errors = 0
+
+ pre_detect = int(self.preamble_len) # Number of valid pulses to detect.
+ pre_samples = self.samplenum - self.samplenumber_last
+ if len(self.preamble) > 0:
+ if (pre_samples * l2s < self.preamble[-1][1] or
+ self.preamble[-1][1] * l2s < pre_samples): # Garbage in.
+ self.put(self.samplenum, self.samplenum,
+ self.out_ann, [0, ['R']]) # Display resets.
+ self.preamble = [] # Clear buffer.
+ self.preamble.append([self.samplenumber_last,
+ pre_samples, state])
+ self.edge_count == 0
+ self.samplenumber_last = self.samplenum
+ self.word_first = self.samplenum
+ else:
+ self.preamble.append([self.samplenumber_last,
+ pre_samples, state])
+ else:
+ self.preamble.append([self.samplenumber_last,
+ pre_samples, state])
+
+ pre = self.preamble
+ if len(self.preamble) == pre_detect: # Have a valid series of pulses.
+ if self.preamble[0][2] == '1':
+ self.sample_high = self.preamble[0][1] # Allows skewed pulses.
+ self.sample_low = self.preamble[1][1]
+ else:
+ self.sample_high = self.preamble[1][1]
+ self.sample_low = self.preamble[0][1]
+
+ self.edge_count = 0
+
+ for i in range(len(self.preamble)):
+ if i > 1:
+ if (pre[i][1] > pre[i - 2][1] * 1.25 or
+ pre[i][1] * 1.25 < pre[i - 2][1]): # Adjust ref width.
+ if pre[i][2] == '1':
+ self.sample_high = pre[i][1]
+ else:
+ self.sample_low = pre[i][1]
+
+ # Display start of preamble.
+ if self.decodeas == 'NRZ':
+ self.decode_nrz(pre[i][0], pre[i][1], pre[i][2])
+ if self.decodeas == 'Manchester':
+ self.decode_manchester(pre[i][0], pre[i][1], pre[i][2])
+ if self.decodeas == 'Diff Manchester':
+ self.es = pre[i][0] + pre[i][1]
+ self.decode_diff_manchester(pre[i][0], pre[i][1], pre[i][2])
+
+ # Used to timeout signal.
+ self.sample_first = int((self.sample_high + self.sample_low)/2)
+ self.insync = 1
+ self.state = 'DECODING'
+ self.lstate = state
+ self.lstate_1010 = state
+
+ def decode_diff_manchester(self, start, samples, state):
+ self.pulse_lengths.append(samples)
+
+ # Use different high and low widths to compensate skewed waveforms.
+ dsamples = self.sample_high if state == '1' else self.sample_low
+
+ self.es = start + samples
+ p_length = round(samples / dsamples) # Find relative pulse length.
+
+ if self.edge_count == 0:
+ self.diff_man_trans = '1' # Very first pulse must be a transition.
+ self.diff_man_len = 1 # Must also be a half pulse.
+ self.ss = start
+ elif self.edge_count % 2 == 1: # Time to make a decision.
+ if self.diffmanvar == '0': # Transition at self.ss is a zero.
+ self.diff_man_trans = '0' if self.diff_man_trans == '1' else '1'
+ if self.diff_man_len == 1 and p_length == 1:
+ self.putx([4, [self.diff_man_trans]])
+ self.decoded.append([self.ss, self.es, self.diff_man_trans])
+ self.diff_man_trans = '1'
+ elif self.diff_man_len == 1 and p_length == 2:
+ self.es -= int(samples / 2)
+ self.putx([4, [self.diff_man_trans]])
+ self.decoded.append([self.ss, self.es, self.diff_man_trans])
+ self.diff_man_trans = '0'
+ self.edge_count += 1 # Add a virt edge to keep in sync with clk.
+ elif self.diff_man_len == 2 and p_length == 1:
+ self.putx([4, [self.diff_man_trans]])
+ self.decoded.append([self.ss, self.es, self.diff_man_trans])
+ self.diff_man_trans = '1'
+ elif self.diff_man_len == 2 and p_length == 2: # Double illegal E E.
+ self.es -= samples
+ self.putx([4, ['E']])
+ self.decoded.append([self.ss, self.es, 'E'])
+ self.ss = self.es
+ self.es += samples
+ self.putx([4, ['E']])
+ self.decoded.append([self.ss, self.es, 'E'])
+ self.diff_man_trans = '1'
+ elif self.diff_man_len == 1 and p_length > 4:
+ if self.state == 'DECODE_TIMEOUT':
+ self.es = self.ss + 2 * self.sample_first
+ self.putx([4, [self.diff_man_trans]]) # Write error.
+ self.decoded.append([self.ss, self.es, self.diff_man_trans])
+ self.ss = self.decoded[0][0]
+ self.es = self.decoded[len(self.decoded) - 1][1]
+ self.dump_pulse_lengths()
+ if self.man_errors < self.max_errors:
+ self.putp(self.decoded)
+ else:
+ error_message = 'Probably not Diff Manchester encoded'
+ self.ss = self.word_first
+ self.putx([1, [error_message]])
+ self.decode_timeout()
+ self.diff_man_trans = '1'
+ self.ss = self.es
+ self.diff_man_len = p_length # Save the previous length.
+ self.edge_count += 1
+
+ def decode_manchester_sim(self, start, samples, state,
+ dsamples, half_time, lstate, ss, pream):
+ ook_bit = []
+ errors = 0
+ if self.edge_count == 0:
+ half_time += 1
+ if samples > 0.75 * dsamples and samples <= 1.5 * dsamples: # Long p.
+ half_time += 2
+ if half_time % 2 == 0: # Transition.
+ es = start
+ else:
+ es = start + int(samples / 2)
+ if ss == start:
+ lstate = 'E'
+ es = start + samples
+ if not (self.edge_count == 0 and pream == '1010'): # Skip first p.
+ ook_bit = [ss, es, lstate]
+ lstate = state
+ ss = es
+ elif samples > 0.25 * dsamples and samples <= 0.75 * dsamples: # Short p.
+ half_time += 1
+ if (half_time % 2 == 0): # Transition.
+ es = start + samples
+ ook_bit = [ss, es, lstate]
+ lstate = state
+ ss = es
+ else: # 1st half.
+ ss = start
+ lstate = state
+ else: # Too long or too short - error.
+ errors = 1
+ if self.state != 'DECODE_TIMEOUT': # Error condition.
+ lstate = 'E'
+ es = ss + samples
+ else: # Assume final half bit buried in timeout pulse.
+ es = ss + self.sample_first
+ ook_bit = [ss, es, lstate]
+ ss = es
+
+ return (half_time, lstate, ss, ook_bit, errors)
+
+ def decode_manchester(self, start, samples, state):
+ self.pulse_lengths.append(samples)
+
+ # Use different high and low widths to compensate skewed waveforms.
+ dsamples = self.sample_high if state == '1' else self.sample_low
+
+ if self.preamble_val != '1010': # 1111 preamble is half clock T.
+ (self.half_time, self.lstate, self.ss_1111, ook_bit, errors) = (
+ self.decode_manchester_sim(start, samples, state, dsamples * 2,
+ self.half_time, self.lstate,
+ self.ss_1111, '1111'))
+ self.man_errors += errors
+ if ook_bit != []:
+ self.decoded.append([ook_bit[0], ook_bit[1], ook_bit[2]])
+
+ if self.preamble_val != '1111': # 1010 preamble is clock T.
+ (self.half_time_1010, self.lstate_1010, self.ss_1010,
+ ook_bit, errors) = (
+ self.decode_manchester_sim(start, samples, state, dsamples,
+ self.half_time_1010, self.lstate_1010,
+ self.ss_1010, '1010'))
+ self.man_errors_1010 += errors
+ if ook_bit != []:
+ self.decoded_1010.append([ook_bit[0], ook_bit[1], ook_bit[2]])
+
+ self.edge_count += 1
+
+ # Stream display and save ook_bit.
+ if ook_bit != []:
+ self.ss, self.es = ook_bit[0], ook_bit[1]
+ if self.preamble_val == '1111':
+ self.putx([2, [ook_bit[2]]])
+ if self.preamble_val == '1010':
+ self.putx([3, [ook_bit[2]]])
+
+ if self.state == 'DECODE_TIMEOUT': # End of packet.
+ self.dump_pulse_lengths()
+
+ decoded = []
+ # If 1010 preamble has less errors use it.
+ if (self.preamble_val == '1010' or
+ (self.man_errors_1010 < self.max_errors and
+ self.man_errors_1010 < self.man_errors and
+ len(self.decoded_1010) > 0)):
+ decoded = self.decoded_1010
+ man_errors = self.man_errors_1010
+ d_row = 3
+ else:
+ decoded = self.decoded
+ man_errors = self.man_errors
+ d_row = 2
+
+ if self.preamble_val == 'auto': # Display OOK packet.
+ for i in range(len(decoded)):
+ self.ss, self.es = decoded[i][0], decoded[i][1]
+ self.putx([d_row, [decoded[i][2]]])
+
+ if (man_errors < self.max_errors and len(decoded) > 0):
+ self.ss, self.es = decoded[0][0], decoded[len(decoded) - 1][1]
+ self.putp(decoded)
+ else:
+ error_message = 'Not Manchester encoded or wrong preamble'
+ self.ss = self.word_first
+ self.putx([1, [error_message]])
+
+ self.put(self.es, self.es, self.out_ann, [0, ['T']]) # Mark timeout.
+ self.decode_timeout()
+
+ def decode_timeout(self):
+ self.word_count = 0
+ self.samplenumber_last = None
+ self.edge_count = 0
+ self.man_errors = 0 # Clear the bit error counters.
+ self.man_errors_1010 = 0
+ self.state = 'IDLE'
+ self.wait({0: 'e'}) # Get rid of long pulse.
+ self.samplenumber_last = self.samplenum
+ self.word_first = self.samplenum
+ self.insync = 0 # Preamble in sync flag
+ self.preamble = [] # Preamble buffer
+ self.half_time = -1 # Half time for man 1111
+ self.half_time_1010 = 0 # Half time for man 1010
+ self.decoded = [] # Decoded bits
+ self.decoded_1010 = [] # Decoded bits for man 1010
+ self.pulse_lengths = []
+
+ def decode(self):
+ while True:
+ if self.edge_count == 0: # Waiting for a signal.
+ pin = self.wait({0: 'e'})
+ self.state = 'DECODING'
+ else:
+ pin = self.wait([{0: 'e'}, {'skip': 5 * self.sample_first}])
+ if self.matched[1] and not self.matched[0]: # No edges for 5 p's.
+ self.state = 'DECODE_TIMEOUT'
+
+ if not self.samplenumber_last: # Set counters to start of signal.
+ self.samplenumber_last = self.samplenum
+ self.word_first = self.samplenum
+ continue
+ samples = self.samplenum - self.samplenumber_last
+ if not self.sample_first: # Get number of samples for first pulse.
+ self.sample_first = samples
+
+ pinstate = pin[0]
+ if self.state == 'DECODE_TIMEOUT': # No edge so flip the state.
+ pinstate = int(not pinstate)
+ if self.invert == 'yes': # Invert signal.
+ pinstate = int(not pinstate)
+ state = '0' if pinstate else '1'
+
+ # No preamble filtering or checking and no skew correction.
+ if self.preamble_len == '0':
+ self.sample_high = self.sample_first
+ self.sample_low = self.sample_first
+ self.insync = 0
+
+ if self.insync == 0:
+ self.lock_onto_preamble(samples, state)
+ else:
+ if self.decodeas == 'NRZ':
+ self.decode_nrz(self.samplenumber_last, samples, state)
+ if self.decodeas == 'Manchester':
+ self.decode_manchester(self.samplenumber_last,
+ samples, state)
+ if self.decodeas == 'Diff Manchester':
+ self.decode_diff_manchester(self.samplenumber_last,
+ samples, state)
+
+ self.samplenumber_last = self.samplenum
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Steve R <steversig@virginmedia.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+This decoder stacks on top of the 'ook' PD and decodes the Oregon Scientific
+433MHz remote control protocol for weather sensors.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Steve R <steversig@virginmedia.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+# Most of the info here comes from "434MHz RF Protocol Descriptions for
+# Wireless Weather Sensors - October 2015" Known Sensor ID Codes - p25.
+
+# Format is 4 hex digit ID code followed by a LIST of models that use that
+# ID and the type of sensor.
+# SensorID is used as the hash in a Python hash table, so it must be upper case.
+# The type of sensor is used to decode and display readings in the L2 decode,
+# it's case-sensitive.
+# Be very careful with the formatting ' [] and commas.
+
+sensor = {
+# 'SensorID': [['model1', 'model2'], 'type'],
+ '1984': [['WGR800'], 'Wind'], # The newer anemometer with no temperature/RH sensor.
+ '1994': [['WGR800'], 'Wind'], # The original anemometer which included a temperature/RH sensor.
+ '1A2D': [['THGR228N'], 'Temp_Hum1'],
+ '1A3D': [['THGR918'], ''],
+ '1D20': [['THGN123N', 'THGR122NX', 'THGN123N', 'THGR228N'], 'Temp_Hum'],
+ '1D30': [['THGN500', 'THGN132N'], ''],
+ '2914': [['PCR800'], 'Rain'],
+ '2A19': [['PCR800'], 'Rain1'],
+ '2A1D': [['RGR918'], 'Rain'],
+ '2D10': [['RGR968', 'PGR968 '], 'Rain1'],
+ '3A0D': [['STR918', 'WGR918'], 'Wind'],
+ '5A5D': [['BTHR918'], ''],
+ '5A6D': [['BTHR918N'], 'Temp_Hum_Baro'],
+ '5D53': [['BTHGN129'], 'Baro'],
+ '5D60': [['BTHR968'], 'Temp_Hum_Baro'],
+ 'C844': [['THWR800'], 'Temp'],
+ 'CC13': [['RTGR328N'], 'Temp_Hum'],
+ 'CC23': [['THGR328N'], 'Temp_Hum'],
+ 'CD39': [['RTHR328N'], 'Temp'],
+ 'D874': [['UVN800'], 'UV1'],
+ 'EA4C': [['THWR288A'], 'Temp'],
+ 'EC40': [['THN132N', 'THR238NF'], 'Temp'],
+ 'EC70': [['UVR128'], 'UV'],
+ 'F824': [['THGN800', 'THGN801', 'THGR810'], 'Temp_Hum'],
+ 'F8B4': [['THGR810'], 'Temp_Hum'],
+# '': ['PSR01'], '', ''],
+# '': ['RTGR328NA'], '', ''],
+# '': ['THC268'], '', ''],
+# '': ['THWR288A-JD'], '', ''],
+# '': ['THGR268'], '', ''],
+# '': ['THR268'], '', ''],
+}
+
+# The sensor checksum exceptions are used to calculate the right checksum for
+# sensors that don't follow the v1, v2.1 and v3 methods. For instance a v2.1
+# sensor that has a v3 checksum.
+sensor_checksum = {
+# 'SensorID': ['checksum_method', 'comment'],
+ '1D20': ['v3', 'THGR228N'],
+ '5D60': ['v3', 'BTHR918N'],
+ 'EC40': ['v3', 'THN132N'],
+}
+
+dir_table = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', 'N']
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Steve R <steversig@virginmedia.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sigrokdecode as srd
+import math
+from .lists import *
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'ook_oregon'
+ name = 'Oregon'
+ longname = 'Oregon Scientific'
+ desc = 'Oregon Scientific weather sensor protocol.'
+ license = 'gplv2+'
+ inputs = ['ook']
+ outputs = []
+ annotations = (
+ ('bit', 'Bit'),
+ ('field', 'Field'),
+ ('l2', 'Level 2'),
+ ('pre', 'Preamble'),
+ ('syn', 'Sync'),
+ ('id', 'SensorID'),
+ ('ch', 'Channel'),
+ ('roll', 'Rolling code'),
+ ('f1', 'Flags1'),
+ )
+ annotation_rows = (
+ ('bits', 'Bits', (0,)),
+ ('fields', 'Fields', (1, 3, 4)),
+ ('l2', 'Level 2', (2,)),
+ )
+ binary = (
+ ('data-hex', 'Hex data'),
+ )
+ options = (
+ {'id': 'unknown', 'desc': 'Unknown type is', 'default': 'Unknown',
+ 'values': ('Unknown', 'Temp', 'Temp_Hum', 'Temp_Hum1', 'Temp_Hum_Baro',
+ 'Temp_Hum_Baro1', 'UV', 'UV1', 'Wind', 'Rain', 'Rain1')},
+ )
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.decoded = [] # Local cache of decoded OOK.
+ self.skip = None
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+ self.out_binary = self.register(srd.OUTPUT_BINARY)
+ self.unknown = self.options['unknown']
+
+ def putx(self, data):
+ self.put(self.ss, self.es, self.out_ann, data)
+
+ def dump_oregon_hex(self, start, finish):
+ nib = self.decoded_nibbles
+ hexstring = ''
+ for x in nib:
+ hexstring += str(x[3]) if x[3] != '' else ' '
+ s = 'Oregon ' + self.ver + ' \"' + hexstring.upper() + '\"\n'
+ self.put(start, finish, self.out_binary,
+ [0, bytes([ord(c) for c in s])])
+
+ def oregon_put_pre_and_sync(self, len_pream, len_sync, ver):
+ ook = self.decoded
+ self.decode_pos = len_pream
+ self.ss, self.es = ook[0][0], ook[self.decode_pos][0]
+ self.putx([1, ['Oregon ' + ver + ' Preamble', ver + ' Preamble',
+ ver + ' Pre', ver]])
+ self.decode_pos += len_sync
+ self.ss, self.es = ook[len_pream][0], ook[self.decode_pos][0]
+ self.putx([1, ['Sync', 'Syn', 'S']])
+
+ # Strip off preamble and sync bits.
+ self.decoded = self.decoded[self.decode_pos:]
+ self.ookstring = self.ookstring[self.decode_pos:]
+ self.ver = ver
+
+ def oregon(self):
+ self.ookstring = ''
+ self.decode_pos = 0
+ ook = self.decoded
+ for i in range(len(ook)):
+ self.ookstring += ook[i][2]
+ if '10011001' in self.ookstring[:40]:
+ (preamble, data) = self.ookstring.split('10011001', 1)
+ if len(data) > 0 and len(preamble) > 16:
+ self.oregon_put_pre_and_sync(len(preamble), 8, 'v2.1')
+ self.oregon_v2()
+ elif 'E1100' in self.ookstring[:17]:
+ (preamble, data) = self.ookstring.split('E1100', 1)
+ if len(data) > 0 and len(preamble) <= 12:
+ self.oregon_put_pre_and_sync(len(preamble), 5, 'v1')
+ self.oregon_v1()
+ elif '0101' in self.ookstring[:28]:
+ (preamble, data) = self.ookstring.split('0101', 1)
+ if len(data) > 0 and len(preamble) > 12:
+ self.oregon_put_pre_and_sync(len(preamble), 4, 'v3')
+ self.oregon_v3()
+ elif len(self.ookstring) > 16: # Ignore short packets.
+ error_message = 'Not Oregon or wrong preamble'
+ self.ss, self.es = ook[0][0], ook[len(ook) - 1][1]
+ self.putx([1,[error_message]])
+
+ def oregon_v1(self):
+ ook = self.decoded
+ self.decode_pos = 0
+ self.decoded_nibbles = []
+ if len(self.decoded) >= 32: # Check there are at least 8 nibbles.
+ self.oregon_put_nib('RollingCode', ook[self.decode_pos][0],
+ ook[self.decode_pos + 3][1], 4)
+ self.oregon_put_nib('Ch', ook[self.decode_pos][0],
+ ook[self.decode_pos + 3][1], 4)
+ self.oregon_put_nib('Temp', ook[self.decode_pos][0],
+ ook[self.decode_pos + 15][1], 16)
+ self.oregon_put_nib('Checksum', ook[self.decode_pos][0],
+ ook[self.decode_pos + 7][1], 8)
+
+ self.dump_oregon_hex(ook[0][0], ook[len(ook) - 1][1])
+
+ # L2 decode.
+ self.oregon_temp(2)
+ self.oregon_channel(1)
+ self.oregon_battery(2)
+ self.oregon_checksum_v1()
+
+ def oregon_v2(self): # Convert to v3 format - discard odd bits.
+ self.decode_pos = 0
+ self.ookstring = self.ookstring[1::2]
+ for i in range(len(self.decoded)):
+ if i % 2 == 1:
+ self.decoded[i][0] = self.decoded[i - 1][0] # Re-align start pos.
+ self.decoded = self.decoded[1::2] # Discard left hand bits.
+ self.oregon_v3() # Decode with v3 decoder.
+
+ def oregon_nibbles(self, ookstring):
+ num_nibbles = int(len(ookstring) / 4)
+ nibbles = []
+ for i in range(num_nibbles):
+ nibble = ookstring[4 * i : 4 * i + 4]
+ nibble = nibble[::-1] # Reversed from right.
+ nibbles.append(nibble)
+ return nibbles
+
+ def oregon_put_nib(self, label, start, finish, numbits):
+ param = self.ookstring[self.decode_pos:self.decode_pos + numbits]
+ param = self.oregon_nibbles(param)
+ if 'E' in ''.join(param): # Blank out fields with errors.
+ result = ''
+ else:
+ result = hex(int(''.join(param), 2))[2:]
+ if len(result) < numbits / 4: # Reinstate leading zeros.
+ result = '0' * (int(numbits / 4) - len(result)) + result
+ if label != '':
+ label += ': '
+ self.put(start, finish, self.out_ann, [1, [label + result, result]])
+ if label == '': # No label - use nibble position.
+ label = int(self.decode_pos / 4)
+ for i in range(len(param)):
+ ss = self.decoded[self.decode_pos + (4 * i)][0]
+ es = self.decoded[self.decode_pos + (4 * i) + 3][1]
+ # Blank out nibbles with errors.
+ result = '' if ('E' in param[i]) else hex(int(param[i], 2))[2:]
+ # Save nibbles for L2 decoder.
+ self.decoded_nibbles.append([ss, es, label, result])
+ self.decode_pos += numbits
+
+ def oregon_v3(self):
+ self.decode_pos = 0
+ self.decoded_nibbles = []
+ ook = self.decoded
+
+ if len(self.decoded) >= 32: # Check there are at least 8 nibbles.
+ self.oregon_put_nib('SensorID', ook[self.decode_pos][0],
+ ook[self.decode_pos + 16][0], 16)
+ self.oregon_put_nib('Ch', ook[self.decode_pos][0],
+ ook[self.decode_pos + 3][1], 4)
+ self.oregon_put_nib('RollingCode', ook[self.decode_pos][0],
+ ook[self.decode_pos + 7][1], 8)
+ self.oregon_put_nib('Flags1', ook[self.decode_pos][0],
+ ook[self.decode_pos + 3][1], 4)
+
+ rem_nibbles = len(self.ookstring[self.decode_pos:]) // 4
+ for i in range(rem_nibbles): # Display and save rest of nibbles.
+ self.oregon_put_nib('', ook[self.decode_pos][0],
+ ook[self.decode_pos + 3][1], 4)
+ self.dump_oregon_hex(ook[0][0], ook[len(ook) - 1][1])
+ self.oregon_level2() # Level 2 decode.
+ else:
+ error_message = 'Too short to decode'
+ self.put(ook[0][0], ook[-1][1], self.out_ann, [1, [error_message]])
+
+ def oregon_put_l2_param(self, offset, digits, dec_point, pre_label, label):
+ nib = self.decoded_nibbles
+ result = 0
+ out_string = ''.join(str(x[3]) for x in nib[offset:offset + digits])
+ if len(out_string) == digits:
+ for i in range(dec_point, 0, -1):
+ result += int(nib[offset + dec_point - i][3], 16) / pow(10, i)
+ for i in range(dec_point, digits):
+ result += int(nib[offset + i][3], 16) * pow(10, i - dec_point)
+ result = '%g' % (result)
+ else:
+ result = ''
+ es = nib[offset + digits - 1][1]
+ if label == '\u2103':
+ es = nib[offset + digits][1] # Align temp to include +/- nibble.
+ self.put(nib[offset][0], es, self.out_ann,
+ [2, [pre_label + result + label, result]])
+
+ def oregon_temp(self, offset):
+ nib = self.decoded_nibbles
+ if nib[offset + 3][3] != '':
+ temp_sign = str(int(nib[offset + 3][3], 16))
+ temp_sign = '-' if temp_sign != '0' else '+'
+ else:
+ temp_sign = '?'
+ self.oregon_put_l2_param(offset, 3, 1, temp_sign, '\u2103')
+
+ def oregon_baro(self, offset):
+ nib = self.decoded_nibbles
+ baro = ''
+ if not (nib[offset + 2][3] == '' or nib[offset + 1][3] == ''
+ or nib[offset][3] == ''):
+ baro = str(int(nib[offset + 1][3] + nib[offset][3], 16) + 856)
+ self.put(nib[offset][0], nib[offset + 3][1],
+ self.out_ann, [2, [baro + ' mb', baro]])
+
+ def oregon_wind_dir(self, offset):
+ nib = self.decoded_nibbles
+ if nib[offset][3] != '':
+ w_dir = int(int(nib[offset][3], 16) * 22.5)
+ w_compass = dir_table[math.floor((w_dir + 11.25) / 22.5)]
+ self.put(nib[offset][0], nib[offset][1], self.out_ann,
+ [2, [w_compass + ' (' + str(w_dir) + '\u00b0)', w_compass]])
+
+ def oregon_channel(self, offset):
+ nib = self.decoded_nibbles
+ channel = ''
+ if nib[offset][3] != '':
+ ch = int(nib[offset][3], 16)
+ if self.ver != 'v3': # May not be true for all v2.1 sensors.
+ if ch != 0:
+ bit_pos = 0
+ while ((ch & 1) == 0):
+ bit_pos += 1
+ ch = ch >> 1
+ if self.ver == 'v2.1':
+ bit_pos += 1
+ channel = str(bit_pos)
+ elif self.ver == 'v3': # Not sure if this applies to all v3's.
+ channel = str(ch)
+ if channel != '':
+ self.put(nib[offset][0], nib[offset][1],
+ self.out_ann, [2, ['Ch ' + channel, channel]])
+
+ def oregon_battery(self, offset):
+ nib = self.decoded_nibbles
+ batt = 'OK'
+ if nib[offset][3] != '':
+ if (int(nib[offset][3], 16) >> 2) & 0x1 == 1:
+ batt = 'Low'
+ self.put(nib[offset][0], nib[offset][1],
+ self.out_ann, [2, ['Batt ' + batt, batt]])
+
+ def oregon_level2(self): # v2 and v3 level 2 decoder.
+ nib = self.decoded_nibbles
+ self.sensor_id = (nib[0][3] + nib[1][3] + nib[2][3] + nib[3][3]).upper()
+ nl, sensor_type = sensor.get(self.sensor_id, [['Unknown'], 'Unknown'])
+ names = ','.join(nl)
+ # Allow user to try decoding an unknown sensor.
+ if sensor_type == 'Unknown' and self.unknown != 'Unknown':
+ sensor_type = self.unknown
+ self.put(nib[0][0], nib[3][1], self.out_ann,
+ [2, [names + ' - ' + sensor_type, names, nl[0]]])
+ self.oregon_channel(4)
+ self.oregon_battery(7)
+ if sensor_type == 'Rain':
+ self.oregon_put_l2_param(8, 4, 2, '', ' in/hr') # Rain rate
+ self.oregon_put_l2_param(12, 6, 3, 'Total ', ' in') # Rain total
+ self.oregon_checksum(18)
+ if sensor_type == 'Rain1':
+ self.oregon_put_l2_param(8, 3, 1, '', ' mm/hr') # Rain rate
+ self.oregon_put_l2_param(11, 5, 1, 'Total ', ' mm') # Rain total
+ self.oregon_checksum(18)
+ if sensor_type == 'Temp':
+ self.oregon_temp(8)
+ self.oregon_checksum(12)
+ if sensor_type == 'Temp_Hum_Baro':
+ self.oregon_temp(8)
+ self.oregon_put_l2_param(12, 2, 0, 'Hum ', '%') # Hum
+ self.oregon_baro(15) # Baro
+ self.oregon_checksum(19)
+ if sensor_type == 'Temp_Hum_Baro1':
+ self.oregon_temp(8)
+ self.oregon_put_l2_param(12, 2, 0, 'Hum ', '%') # Hum
+ self.oregon_baro(14) # Baro
+ if sensor_type == 'Temp_Hum':
+ self.oregon_temp(8)
+ self.oregon_put_l2_param(12, 2, 0, 'Hum ', '%') # Hum
+ self.oregon_checksum(15)
+ if sensor_type == 'Temp_Hum1':
+ self.oregon_temp(8)
+ self.oregon_put_l2_param(12, 2, 0, 'Hum ', '%') # Hum
+ self.oregon_checksum(14)
+ if sensor_type == 'UV':
+ self.oregon_put_l2_param(8, 2, 0, '', '') # UV
+ if sensor_type == 'UV1':
+ self.oregon_put_l2_param(11, 2, 0,'' ,'') # UV
+ if sensor_type == 'Wind':
+ self.oregon_wind_dir(8)
+ self.oregon_put_l2_param(11, 3, 1, 'Gust ', ' m/s') # Wind gust
+ self.oregon_put_l2_param(14, 3, 1, 'Speed ', ' m/s') # Wind speed
+ self.oregon_checksum(17)
+
+ def oregon_put_checksum(self, nibbles, checksum):
+ nib = self.decoded_nibbles
+ result = 'BAD'
+ if (nibbles + 1) < len(nib):
+ if (nib[nibbles + 1][3] != '' and nib[nibbles][3] != ''
+ and checksum != -1):
+ if self.ver != 'v1':
+ if checksum == (int(nib[nibbles + 1][3], 16) * 16 +
+ int(nib[nibbles][3], 16)):
+ result = 'OK'
+ else:
+ if checksum == (int(nib[nibbles][3], 16) * 16 +
+ int(nib[nibbles + 1][3], 16)):
+ result = 'OK'
+ rx_check = (nib[nibbles + 1][3] + nib[nibbles][3]).upper()
+ details = '%s Calc %s Rx %s ' % (result, hex(checksum)[2:].upper(),
+ rx_check)
+ self.put(nib[nibbles][0], nib[nibbles + 1][1],
+ self.out_ann, [2, ['Checksum ' + details, result]])
+
+ def oregon_checksum(self, nibbles):
+ checksum = 0
+ for i in range(nibbles): # Add reversed nibbles.
+ nibble = self.ookstring[i * 4 : i * 4 + 4]
+ nibble = nibble[::-1] # Reversed from right.
+ if 'E' in nibble: # Abort checksum if there are errors.
+ checksum = -1
+ break
+ checksum += int(nibble, 2)
+ if checksum > 255:
+ checksum -= 255 # Make it roll over at 255.
+ chk_ver, comment = sensor_checksum.get(self.sensor_id,
+ ['Unknown', 'Unknown'])
+ if chk_ver != 'Unknown':
+ self.ver = chk_ver
+ if self.ver == 'v2.1':
+ checksum -= 10 # Subtract 10 from v2 checksums.
+ self.oregon_put_checksum(nibbles, checksum)
+
+ def oregon_checksum_v1(self):
+ nib = self.decoded_nibbles
+ checksum = 0
+ for i in range(3): # Add the first three bytes.
+ if nib[2 * i][3] == '' or nib[2 * i + 1][3] == '': # Abort if blank.
+ checksum = -1
+ break
+ checksum += ((int(nib[2 * i][3], 16) & 0xF) << 4 |
+ (int(nib[2 * i + 1][3], 16) & 0xF))
+ if checksum > 255:
+ checksum -= 255 # Make it roll over at 255.
+ self.oregon_put_checksum(6, checksum)
+
+ def decode(self, ss, es, data):
+ self.decoded = data
+ self.oregon()
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Steve R <steversig@virginmedia.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+This decoder stacks on top of the 'ook' PD and visualizes protocol details
+in various ways.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Steve R <steversig@virginmedia.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sigrokdecode as srd
+from common.srdhelper import bcd2int
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'ook_vis'
+ name = 'OOK visualisation'
+ longname = 'On-off keying visualisation'
+ desc = 'OOK visualisation in various formats.'
+ license = 'gplv2+'
+ inputs = ['ook']
+ outputs = ['ook']
+ annotations = (
+ ('bit', 'Bit'),
+ ('ref', 'Reference'),
+ ('field', 'Field'),
+ ('ref_field', 'Ref field'),
+ ('level2', 'L2'),
+ ('ref_level2', 'Ref L2'),
+ )
+ annotation_rows = (
+ ('bits', 'Bits', (0,)),
+ ('compare', 'Compare', (1,)),
+ ('fields', 'Fields', (2,)),
+ ('ref_fields', 'Ref fields', (3,)),
+ ('level2', 'L2', (4,)),
+ ('ref_level2', 'Ref L2', (5,)),
+ )
+ options = (
+ {'id': 'displayas', 'desc': 'Display as', 'default': 'Nibble - Hex',
+ 'values': ('Byte - Hex', 'Byte - Hex rev', 'Byte - BCD',
+ 'Byte - BCD rev', 'Nibble - Hex', 'Nibble - Hex rev', 'Nibble - BCD',
+ 'Nibble - BCD rev')},
+ {'id': 'synclen', 'desc': 'Sync length', 'default': '4',
+ 'values': ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10')},
+ {'id': 'syncoffset', 'desc': 'Sync offset', 'default': '0',
+ 'values': ('-4', '-3', '-2', '-1', '0', '1', '2', '3', '4')},
+ {'id': 'refsample', 'desc': 'Compare', 'default': 'off', 'values':
+ ('off', 'show numbers', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
+ '21', '22', '23', '24', '25', '26', '27', '28', '29', '30')},
+ )
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.decoded = [] # Local cache of decoded OOK.
+ self.ookstring = ''
+ self.ookcache = []
+ self.trace_num = 0
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+ self.out_python = self.register(srd.OUTPUT_PYTHON)
+ self.displayas = self.options['displayas']
+ self.sync_length = self.options['synclen']
+ self.sync_offset = self.options['syncoffset']
+ self.ref = self.options['refsample']
+
+ def putx(self, data):
+ self.put(self.ss, self.es, self.out_ann, data)
+
+ def putp(self, data):
+ self.put(self.ss, self.es, self.out_python, data)
+
+ def display_level2(self, bits, line):
+ self.decode_pos = 0
+ ook = self.decoded
+ # Find the end of the preamble which could be 1010 or 1111.
+ if len(ook) > 1:
+ preamble_end = len(ook) + 1
+ char_first = ook[0][2]
+ char_second = ook[1][2]
+ if char_first == char_second: # 1111
+ preamble = '1111'
+ char_last = char_first
+ else:
+ preamble = '1010'
+ char_last = char_second
+ for i in range(len(ook)):
+ if preamble == '1111':
+ if ook[i][2] != char_last:
+ preamble_end = i
+ break
+ else:
+ char_last = ook[i][2]
+ else:
+ if ook[i][2] != char_last:
+ char_last = ook[i][2]
+ else:
+ preamble_end = i
+ break
+
+ if len(ook) >= preamble_end:
+ preamble_end += int(self.sync_offset) - 1
+ self.ss, self.es = ook[0][0], ook[preamble_end][1]
+ self.putx([line, ['Preamble', 'Pre', 'P']])
+ self.decode_pos += preamble_end
+
+ if len(ook) > self.decode_pos + int(self.sync_length):
+ self.ss = self.es
+ self.es = ook[self.decode_pos + int(self.sync_length)][1]
+ self.putx([line, ['Sync', 'Syn', 'S']])
+ self.decode_pos += int(self.sync_length) + 1
+
+ ookstring = self.ookstring[self.decode_pos:]
+ rem_nibbles = len(ookstring) // bits
+ for i in range(rem_nibbles): # Display the rest of nibbles.
+ self.ss = ook[self.decode_pos][0]
+ self.es = ook[self.decode_pos + bits - 1][1]
+ self.put_field(bits, line)
+
+ def put_field(self, numbits, line):
+ param = self.ookstring[self.decode_pos:self.decode_pos + numbits]
+ if 'rev' in self.displayas:
+ param = param[::-1] # Reversed from right.
+ if not 'E' in param: # Format if no errors.
+ if 'Hex' in self.displayas:
+ param = hex(int(param, 2))[2:]
+ elif 'BCD' in self.displayas:
+ param = bcd2int(int(param, 2))
+ self.putx([line, [str(param)]])
+ self.decode_pos += numbits
+
+ def display_all(self):
+ ookstring = ''
+ self.decode_pos = 0
+ ook = self.decoded
+ for i in range(len(ook)):
+ self.ookstring += ook[i][2]
+ bits = 4 if 'Nibble' in self.displayas else 8
+ rem_nibbles = len(self.ookstring) // bits
+ for i in range(rem_nibbles): # Display the rest of the nibbles.
+ self.ss = ook[self.decode_pos][0]
+ self.es = ook[self.decode_pos + bits - 1][1]
+ self.put_field(bits, 2)
+
+ self.display_level2(bits, 4) # Display L2 decode.
+
+ if (self.ref != 'off' and self.ref != 'show numbers' and
+ len(self.ookcache) >= int(self.ref)): # Compare traces.
+ ref = int(self.ref) - 1
+ self.display_ref(self.trace_num, ref)
+ if len(self.ookcache) == int(self.ref): # Backfill.
+ for i in range(0, ref):
+ self.display_ref(i, ref)
+ elif self.ref == 'show numbers': # Display ref numbers.
+ self.ss = self.ookcache[self.trace_num][0][0]
+ end_sig = len(self.ookcache[self.trace_num]) - 1
+ self.es = self.ookcache[self.trace_num][end_sig][1]
+ self.putx([1, [str(self.trace_num + 1)]])
+
+ def display_ref(self, t_num, ref):
+ display_len = len(self.ookcache[ref])
+ if len(self.ookcache[t_num]) < len(self.ookcache[ref]):
+ display_len = len(self.ookcache[t_num])
+ for i in range(display_len):
+ self.ss = self.ookcache[t_num][i][0]
+ self.es = self.ookcache[t_num][i][1]
+ self.putx([1, [self.ookcache[ref][i][2]]])
+
+ def add_to_cache(self): # Cache the OOK so it can be used as a reference.
+ self.ookcache.append(self.decoded)
+
+ def decode(self, ss, es, data):
+ self.decoded = data
+ self.add_to_cache()
+ self.display_all()
+ self.ookstring = ''
+ self.trace_num += 1
+ self.ss = ss
+ self.es = es
+ self.putp(data) # Send data up the stack.
self.cs_was_deasserted = False
self.have_cs = self.have_miso = self.have_mosi = None
- def metadata(self, key, value):
- if key == srd.SRD_CONF_SAMPLERATE:
- self.samplerate = value
-
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
- if self.samplerate:
- self.out_bitrate = self.register(srd.OUTPUT_META,
- meta=(int, 'Bitrate', 'Bitrate during transfers'))
+ self.out_bitrate = self.register(srd.OUTPUT_META,
+ meta=(int, 'Bitrate', 'Bitrate during transfers'))
self.bw = (self.options['wordsize'] + 7) // 8
+ def metadata(self, key, value):
+ if key == srd.SRD_CONF_SAMPLERATE:
+ self.samplerate = value
+
def putw(self, data):
self.put(self.ss_block, self.samplenum, self.out_ann, data)
(0x60, ('CE', 'Chip erase')),
(0x70, ('ESRY', 'Enable SO to output RY/BY#')),
(0x80, ('DSRY', 'Disable SO to output RY/BY#')),
+ (0x82, ('WRITE1', 'Main memory page program through buffer 1 with built-in erase')),
+ (0x85, ('WRITE2', 'Main memory page program through buffer 2 with built-in erase')),
(0x90, ('REMS', 'Read electronic manufacturer & device ID')),
(0x9f, ('RDID', 'Read identification')),
(0xab, ('RDP/RES', 'Release from deep powerdown / Read electronic ID')),
(0xbb, ('2READ', '2x I/O read')), # a.k.a. "Fast read dual I/O".
(0xc1, ('EXSO', 'Exit secured OTP')),
(0xc7, ('CE2', 'Chip erase')), # Alternative command ID
+ (0xd7, ('STATUS', 'Status register read')),
(0xd8, ('BE', 'Block erase')),
(0xef, ('REMS2', 'Read ID for 2x I/O mode')),
])
device_name = {
+ 'adesto': {
+ 0x00: 'AT45Dxxx family, standard series',
+ },
'fidelix': {
0x15: 'FM25Q32',
},
}
chips = {
+ # Adesto
+ 'adesto_at45db161e': {
+ 'vendor': 'Adesto',
+ 'model': 'AT45DB161E',
+ 'res_id': 0xff, # The chip doesn't emit an ID here.
+ 'rems_id': 0xffff, # Not supported by the chip.
+ 'rems2_id': 0xffff, # Not supported by the chip.
+ 'rdid_id': 0x1f26000100, # RDID and 2 extra "EDI" bytes.
+ 'page_size': 528, # Configurable, could also be 512 bytes.
+ 'sector_size': 128 * 1024,
+ 'block_size': 4 * 1024,
+ },
# FIDELIX
'fidelix_fm25q32': {
'vendor': 'FIDELIX',
# Don't forget to keep this in sync with 'cmds' is lists.py.
class Ann:
WRSR, PP, READ, WRDI, RDSR, WREN, FAST_READ, SE, RDSCUR, WRSCUR, \
- RDSR2, CE, ESRY, DSRY, REMS, RDID, RDP_RES, CP, ENSO, DP, READ2X, \
- EXSO, CE2, BE, REMS2, \
+ RDSR2, CE, ESRY, DSRY, WRITE1, WRITE2, REMS, RDID, RDP_RES, CP, ENSO, DP, \
+ READ2X, EXSO, CE2, STATUS, BE, REMS2, \
BIT, FIELD, WARN = range(L + 3)
def cmd_annotation_classes():
self.data.append(miso)
self.cmdstate += 1
+ def handle_write_common(self, mosi, miso, ann):
+ # Write data bytes: Master asserts CS#, sends WRITE command, sends
+ # 3-byte address, writes >= 1 data bytes, de-asserts CS#.
+ if self.cmdstate == 1:
+ # Byte 1: Master sends command ID.
+ self.emit_cmd_byte()
+ elif self.cmdstate in (2, 3, 4):
+ # Bytes 2/3/4: Master sends write address (24bits, MSB-first).
+ self.emit_addr_bytes(mosi)
+ elif self.cmdstate >= 5:
+ # Bytes 5-x: Master writes data bytes (until CS# de-asserted).
+ self.es_field = self.es # Will be overwritten for each byte.
+ if self.cmdstate == 5:
+ self.ss_field = self.ss
+ self.on_end_transaction = lambda: self.output_data_block('Data', ann)
+ self.data.append(mosi)
+ self.cmdstate += 1
+
+ def handle_write1(self, mosi, miso):
+ self.handle_write_common(mosi, miso, Ann.WRITE1)
+
+ def handle_write2(self, mosi, miso):
+ self.handle_write_common(mosi, miso, Ann.WRITE2)
+
def handle_fast_read(self, mosi, miso):
# Fast read: Master asserts CS#, sends FAST READ command, sends
# 3-byte address + 1 dummy byte, reads >= 1 data bytes, de-asserts CS#.
self.data.append(b2)
self.cmdstate += 1
+ def handle_status(self, mosi, miso):
+ if self.cmdstate == 1:
+ # Byte 1: Master sends command ID.
+ self.emit_cmd_byte()
+ self.on_end_transaction = lambda: self.putc([Ann.STATUS, [cmds[self.state][1]]])
+ else:
+ # Will be overwritten for each byte.
+ self.es_cmd = self.es
+ self.es_field = self.es
+ if self.cmdstate == 2:
+ self.ss_field = self.ss
+ self.putx([Ann.BIT, ['Status register byte %d: 0x%02x' % ((self.cmdstate % 2) + 1, miso)]])
+ self.cmdstate += 1
+
# TODO: Warn/abort if we don't see the necessary amount of bytes.
# TODO: Warn if WREN was not seen before.
def handle_se(self, mosi, miso):
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Aleksander Alekseev <afiskon@gmail.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+'''
+This decoder decodes the ST7735 TFT controller protocol.
+
+Details:
+http://www.displayfuture.com/Display/datasheet/controller/ST7735.pdf
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Aleksander Alekseev <afiskon@gmail.com>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+import sigrokdecode as srd
+
+MAX_DATA_LEN = 128
+
+# Command ID -> name, short description
+META = {
+ 0x00: {'name': 'NOP ', 'desc': 'No operation'},
+ 0x01: {'name': 'SWRESET', 'desc': 'Software reset'},
+ 0x04: {'name': 'RDDID ', 'desc': 'Read display ID'},
+ 0x09: {'name': 'RDDST ', 'desc': 'Read display status'},
+ 0x10: {'name': 'SLPIN ', 'desc': 'Sleep in & booster off'},
+ 0x11: {'name': 'SLPOUT ', 'desc': 'Sleep out & booster on'},
+ 0x12: {'name': 'PTLON ', 'desc': 'Partial mode on'},
+ 0x13: {'name': 'NORON ', 'desc': 'Partial off (normal)'},
+ 0x20: {'name': 'INVOFF ', 'desc': 'Display inversion off'},
+ 0x21: {'name': 'INVON ', 'desc': 'Display inversion on'},
+ 0x28: {'name': 'DISPOFF', 'desc': 'Display off'},
+ 0x29: {'name': 'DISPON ', 'desc': 'Display on'},
+ 0x2A: {'name': 'CASET ', 'desc': 'Column address set'},
+ 0x2B: {'name': 'RASET ', 'desc': 'Row address set'},
+ 0x2C: {'name': 'RAMWR ', 'desc': 'Memory write'},
+ 0x2E: {'name': 'RAMRD ', 'desc': 'Memory read'},
+ 0x30: {'name': 'PTLAR ', 'desc': 'Partial start/end address set'},
+ 0x36: {'name': 'MADCTL ', 'desc': 'Memory data address control'},
+ 0x3A: {'name': 'COLMOD ', 'desc': 'Interface pixel format'},
+ 0xB1: {'name': 'FRMCTR1', 'desc': 'Frame rate control (in normal mode / full colors)'},
+ 0xB2: {'name': 'FRMCTR2', 'desc': 'Frame rate control (in idle mode / 8-colors)'},
+ 0xB3: {'name': 'FRMCTR3', 'desc': 'Frame rate control (in partial mode / full colors) '},
+ 0xB4: {'name': 'INVCTR ', 'desc': 'Display inversion control'},
+ 0xB6: {'name': 'DISSET5', 'desc': 'Display function set 5'},
+ 0xC0: {'name': 'PWCTR1 ', 'desc': 'Power control 1'},
+ 0xC1: {'name': 'PWCTR2 ', 'desc': 'Power control 2'},
+ 0xC2: {'name': 'PWCTR3 ', 'desc': 'Power control 3'},
+ 0xC3: {'name': 'PWCTR4 ', 'desc': 'Power control 4'},
+ 0xC4: {'name': 'PWCTR5 ', 'desc': 'Power control 5'},
+ 0xC5: {'name': 'VMCTR1 ', 'desc': 'VCOM control 1'},
+ 0xDA: {'name': 'RDID1 ', 'desc': 'Read ID1'},
+ 0xDB: {'name': 'RDID2 ', 'desc': 'Read ID2'},
+ 0xDC: {'name': 'RDID3 ', 'desc': 'Read ID3'},
+ 0xDD: {'name': 'RDID4 ', 'desc': 'Read ID4'},
+ 0xFC: {'name': 'PWCTR6 ', 'desc': 'Power control 6'},
+ 0xE0: {'name': 'GMCTRP1', 'desc': 'Gamma \'+\'polarity correction characteristics setting'},
+ 0xE1: {'name': 'GMCTRN1', 'desc': 'Gamma \'-\'polarity correction characteristics setting'},
+}
+
+class Ann:
+ BITS, CMD, DATA, DESC = range(4)
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'st7735'
+ name = 'ST7735'
+ longname = 'Sitronix ST7735'
+ desc = 'Sitronix ST7735 TFT controller protocol.'
+ license = 'gplv2+'
+ inputs = ['logic']
+ outputs = ['st7735']
+ channels = (
+ {'id': 'cs', 'name': 'CS#', 'desc': 'Chip-select'},
+ {'id': 'clk', 'name': 'CLK', 'desc': 'Clock'},
+ {'id': 'mosi', 'name': 'MOSI', 'desc': 'Master out, slave in'},
+ {'id': 'dc', 'name': 'DC', 'desc': 'Data or command'}
+ )
+ annotations = (
+ ('bit', 'Bit'),
+ ('command', 'Command'),
+ ('data', 'Data'),
+ ('description', 'Description'),
+ )
+ annotation_rows = (
+ ('bits', 'Bits', (Ann.BITS,)),
+ ('fields', 'Fields', (Ann.CMD, Ann.DATA)),
+ ('description', 'Description', (Ann.DESC,)),
+ )
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.accum_byte = 0
+ self.accum_bits_num = 0
+ self.bit_ss = -1
+ self.byte_ss = -1
+ self.current_bit = -1
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+
+ def put_desc(self, ss, es, cmd, data):
+ if cmd == -1:
+ return
+ if META[cmd]:
+ self.put(ss, es, self.out_ann, [Ann.DESC,
+ ['%s: %s' % (META[cmd]['name'].strip(), META[cmd]['desc'])]])
+ else:
+ # Default description:
+ dots = ''
+ if len(data) == MAX_DATA_LEN:
+ data = data[:-1]
+ dots = '...'
+ data_str = '(none)'
+ if len(data) > 0:
+ data_str = ' '.join(['%02X' % b for b in data])
+ self.put(ss, es, self.out_ann, [Ann.DESC,
+ ['Unknown command: %02X. Data: %s%s' % (cmd, data_str, dots)]])
+
+ def decode(self):
+ current_cmd = -1
+ current_data = []
+ desc_ss = -1
+ desc_es = -1
+ self.reset()
+ while True:
+ # Check data on both CLK edges.
+ (cs, clk, mosi, dc) = self.wait({1: 'e'})
+
+ if cs == 1: # Wait for CS = low, ignore the rest.
+ self.reset()
+ continue
+
+ if clk == 1:
+ # Read one bit.
+ self.bit_ss = self.samplenum
+ if self.accum_bits_num == 0:
+ self.byte_ss = self.samplenum
+ self.current_bit = mosi
+
+ if (clk == 0) and (self.current_bit >= 0):
+ # Process one bit.
+ self.put(self.bit_ss, self.samplenum, self.out_ann,
+ [Ann.BITS, [str(self.current_bit)]])
+ self.accum_byte = (self.accum_byte << 1) | self.current_bit # MSB-first.
+ self.accum_bits_num += 1
+ if self.accum_bits_num == 8:
+ # Process one byte.
+ ann = Ann.DATA if dc else Ann.CMD # DC = low for commands.
+ self.put(self.byte_ss, self.samplenum, self.out_ann,
+ [ann, ['%02X' % self.accum_byte]])
+ if ann == Ann.CMD:
+ self.put_desc(desc_ss, desc_es, current_cmd, current_data)
+ desc_ss = self.byte_ss
+ desc_es = self.samplenum # For cmds without data.
+ current_cmd = self.accum_byte
+ current_data = []
+ else:
+ if len(current_data) < MAX_DATA_LEN:
+ current_data += [self.accum_byte]
+ desc_es = self.samplenum
+
+ self.accum_bits_num = 0
+ self.accum_byte = 0
+ self.byte_ss = -1
+ self.current_bit = -1
+ self.bit_ss = -1
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Mike Jagdis <mjagdis@eris-associates.co.uk>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+##
+
+'''
+SWIM is a single wire interface for STM8 series 8-bit microcontrollers
+that allows non-intrusive read/wite access to be performed on-the-fly
+to the memory and registers of the MCU for debug and flashing purposes.
+
+See the STMicroelectronics document UM0470 for details.
+'''
+
+from .pd import Decoder
--- /dev/null
+##
+## This file is part of the libsigrokdecode project.
+##
+## Copyright (C) 2018 Mike Jagdis <mjagdis@eris-associates.co.uk>
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+##
+
+import math
+import sigrokdecode as srd
+
+class SamplerateError(Exception):
+ pass
+
+class Decoder(srd.Decoder):
+ api_version = 3
+ id = 'swim'
+ name = 'SWIM'
+ longname = 'STM8 SWIM bus'
+ desc = 'STM8 Single Wire Interface Module (SWIM) protocol.'
+ license = 'gplv2+'
+ inputs = ['logic']
+ outputs = []
+ options = (
+ {'id': 'debug', 'desc': 'Debug', 'default': 'no', 'values': ('yes', 'no') },
+ )
+ channels = (
+ {'id': 'swim', 'name': 'SWIM', 'desc': 'SWIM data line'},
+ )
+ annotations = (
+ ('bit', 'Bit'),
+ ('enterseq', 'SWIM enter sequence'),
+ ('start-host', 'Start bit (host)'),
+ ('start-target', 'Start bit (target)'),
+ ('parity', 'Parity bit'),
+ ('ack', 'Acknowledgement'),
+ ('nack', 'Negative acknowledgement'),
+ ('byte-write', 'Byte write'),
+ ('byte-read', 'Byte read'),
+ ('cmd-unknown', 'Unknown SWIM command'),
+ ('cmd', 'SWIM command'),
+ ('bytes', 'Byte count'),
+ ('address', 'Address'),
+ ('data-write', 'Data write'),
+ ('data-read', 'Data read'),
+ ('debug', 'Debug'),
+ )
+ annotation_rows = (
+ ('bits', 'Bits', (0,)),
+ ('framing', 'Framing', (2, 3, 4, 5, 6, 7, 8)),
+ ('protocol', 'Protocol', (1, 9, 10, 11, 12, 13, 14)),
+ ('debug', 'Debug', (15,)),
+ )
+ binary = (
+ ('tx', 'Dump of data written to target'),
+ ('rx', 'Dump of data read from target'),
+ )
+
+ def __init__(self):
+ # SWIM clock for the target is normally HSI/2 where HSI is 8MHz +- 5%
+ # although the divisor can be removed by setting the SWIMCLK bit in
+ # the CLK_SWIMCCR register. There is no standard for the host so we
+ # will be generous and assume it is using an 8MHz +- 10% oscillator.
+ # We do not need to be accurate. We just need to avoid treating enter
+ # sequence pulses as bits. A synchronization frame will cause this
+ # to be adjusted.
+ self.HSI = 8000000
+ self.HSI_min = self.HSI * 0.9
+ self.HSI_max = self.HSI * 1.1
+ self.swim_clock = self.HSI_min / 2
+
+ self.eseq_edge = [[-1, None], [-1, None]]
+ self.eseq_pairnum = 0
+ self.eseq_pairstart = None
+
+ self.reset()
+
+ def reset(self):
+ self.bit_edge = [[-1, None], [-1, None]]
+ self.bit_maxlen = -1
+ self.bitseq_len = 0
+ self.bitseq_end = None
+ self.proto_state = 'CMD'
+
+ def metadata(self, key, value):
+ if key == srd.SRD_CONF_SAMPLERATE:
+ self.samplerate = value
+
+ def adjust_timings(self):
+ # A low-speed bit is 22 SWIM clocks long.
+ # There are options to shorten bits to 10 clocks or use HSI rather
+ # than HSI/2 as the SWIM clock but the longest valid bit should be no
+ # more than this many samples. This does not need to be accurate.
+ # It exists simply to prevent bits extending unecessarily far into
+ # trailing bus-idle periods. This will be adjusted every time we see
+ # a synchronization frame or start bit in order to show idle periods
+ # as accurately as possible.
+ self.bit_reflen = math.ceil(self.samplerate * 22 / self.swim_clock)
+
+ def start(self):
+ self.out_ann = self.register(srd.OUTPUT_ANN)
+ self.out_binary = self.register(srd.OUTPUT_BINARY)
+
+ if not self.samplerate:
+ raise SamplerateError('Cannot decode without samplerate.')
+
+ # A synchronization frame is a low that lasts for more than 64 but no
+ # more than 128 SWIM clock periods based on the standard SWIM clock.
+ # Note: we also allow for the possibility that the SWIM clock divisor
+ # has been disabled here.
+ self.sync_reflen_min = math.floor(self.samplerate * 64 / self.HSI_max)
+ self.sync_reflen_max = math.ceil(self.samplerate * 128 / (self.HSI_min / 2))
+
+ self.debug = True if self.options['debug'] == 'yes' else False
+
+ # The SWIM entry sequence is 4 pulses at 2kHz followed by 4 at 1kHz.
+ self.eseq_reflen = math.ceil(self.samplerate / 2048)
+
+ self.adjust_timings()
+
+ def protocol(self):
+ if self.proto_state == 'CMD':
+ # Command
+ if self.bitseq_value == 0x00:
+ self.put(self.bitseq_start, self.bitseq_end, self.out_ann, [10, ['system reset', 'SRST', '!']])
+ elif self.bitseq_value == 0x01:
+ self.proto_state = 'N'
+ self.put(self.bitseq_start, self.bitseq_end, self.out_ann, [10, ['read on-the-fly', 'ROTF', 'r']])
+ elif self.bitseq_value == 0x02:
+ self.proto_state = 'N'
+ self.put(self.bitseq_start, self.bitseq_end, self.out_ann, [10, ['write on-the-fly', 'WOTF', 'w']])
+ else:
+ self.put(self.bitseq_start, self.bitseq_end, self.out_ann, [9, ['unknown', 'UNK']])
+ elif self.proto_state == 'N':
+ # Number of bytes
+ self.proto_byte_count = self.bitseq_value
+ self.proto_state = '@E'
+ self.put(self.bitseq_start, self.bitseq_end, self.out_ann, [11, ['byte count 0x%02x' % self.bitseq_value, 'bytes 0x%02x' % self.bitseq_value, '0x%02x' % self.bitseq_value, '%02x' % self.bitseq_value, '%x' % self.bitseq_value]])
+ elif self.proto_state == '@E':
+ # Address byte 1
+ self.proto_addr = self.bitseq_value
+ self.proto_addr_start = self.bitseq_start
+ self.proto_state = '@H'
+ elif self.proto_state == '@H':
+ # Address byte 2
+ self.proto_addr = (self.proto_addr << 8) | self.bitseq_value
+ self.proto_state = '@L'
+ elif self.proto_state == '@L':
+ # Address byte 3
+ self.proto_addr = (self.proto_addr << 8) | self.bitseq_value
+ self.proto_state = 'D'
+ self.put(self.proto_addr_start, self.bitseq_end, self.out_ann, [12, ['address 0x%06x' % self.proto_addr, 'addr 0x%06x' % self.proto_addr, '0x%06x' % self.proto_addr, '%06x' %self.proto_addr, '%x' % self.proto_addr]])
+ else:
+ if self.proto_byte_count > 0:
+ self.proto_byte_count -= 1
+ if self.proto_byte_count == 0:
+ self.proto_state = 'CMD'
+
+ self.put(self.bitseq_start, self.bitseq_end, self.out_ann, [13 + self.bitseq_dir, ['0x%02x' % self.bitseq_value, '%02x' % self.bitseq_value, '%x' % self.bitseq_value]])
+ self.put(self.bitseq_start, self.bitseq_end, self.out_binary, [0 + self.bitseq_dir, bytes([self.bitseq_value])])
+ if self.debug:
+ self.put(self.bitseq_start, self.bitseq_end, self.out_ann, [15, ['%d more' % self.proto_byte_count, '%d' % self.proto_byte_count]])
+
+ def bitseq(self, bitstart, bitend, bit):
+ if self.bitseq_len == 0:
+ # Looking for start of a bit sequence (command or byte).
+ self.bit_reflen = bitend - bitstart
+ self.bitseq_value = 0
+ self.bitseq_dir = bit
+ self.bitseq_len = 1
+ self.put(bitstart, bitend, self.out_ann, [2 + self.bitseq_dir, ['start', 's']])
+ elif (self.proto_state == 'CMD' and self.bitseq_len == 4) or (self.proto_state != 'CMD' and self.bitseq_len == 9):
+ # Parity bit
+ self.bitseq_end = bitstart
+ self.bitseq_len += 1
+
+ self.put(bitstart, bitend, self.out_ann, [4, ['parity', 'par', 'p']])
+
+ # The start bit is not data but was used for parity calculation.
+ self.bitseq_value &= 0xff
+ self.put(self.bitseq_start, self.bitseq_end, self.out_ann, [7 + self.bitseq_dir, ['0x%02x' % self.bitseq_value, '%02x' % self.bitseq_value, '%x' % self.bitseq_value]])
+ elif (self.proto_state == 'CMD' and self.bitseq_len == 5) or (self.proto_state != 'CMD' and self.bitseq_len == 10):
+ # ACK/NACK bit.
+ if bit:
+ self.put(bitstart, bitend, self.out_ann, [5, ['ack', 'a']])
+ else:
+ self.put(bitstart, bitend, self.out_ann, [6, ['nack', 'n']])
+
+ # We only pass data that was ack'd up the stack.
+ if bit:
+ self.protocol()
+
+ self.bitseq_len = 0
+ else:
+ if self.bitseq_len == 1:
+ self.bitseq_start = bitstart
+ self.bitseq_value = (self.bitseq_value << 1) | bit
+ self.bitseq_len += 1
+
+ def bit(self, start, mid, end):
+ if mid - start >= end - mid:
+ self.put(start, end, self.out_ann, [0, ['0']])
+ bit = 0
+ else:
+ self.put(start, end, self.out_ann, [0, ['1']])
+ bit = 1
+
+ self.bitseq(start, end, bit)
+
+ def detect_synchronize_frame(self, start, end):
+ # Strictly speaking, synchronization frames are only recognised when
+ # SWIM is active. A falling edge on reset disables SWIM and an enter
+ # sequence is needed to re-enable it. However we do not want to be
+ # reliant on seeing the NRST pin just for that and we also want to be
+ # able to decode SWIM even if we just sample parts of the dialogue.
+ # For this reason we limit ourselves to only recognizing
+ # synchronization frames that have believable lengths based on our
+ # knowledge of the range of possible SWIM clocks.
+ if self.samplenum - self.eseq_edge[1][1] >= self.sync_reflen_min and self.samplenum - self.eseq_edge[1][1] <= self.sync_reflen_max:
+ self.put(self.eseq_edge[1][1], self.samplenum, self.out_ann, [1, ['synchronization frame', 'synchronization', 'sync', 's']])
+
+ # A low that lasts for more than 64 SWIM clock periods causes a
+ # reset of the SWIM communication state machine and will switch
+ # the SWIM to low-speed mode (SWIM_CSR.HS is cleared).
+ self.reset()
+
+ # The low SHOULD last 128 SWIM clocks. This is used to
+ # resynchronize in order to allow for variation in the frequency
+ # of the internal RC oscillator.
+ self.swim_clock = 128 * (self.samplerate / (self.samplenum - self.eseq_edge[1][1]))
+ self.adjust_timings()
+
+ def eseq_potential_start(self, start, end):
+ self.eseq_pairstart = start
+ self.eseq_reflen = end - start
+ self.eseq_pairnum = 1
+
+ def detect_enter_sequence(self, start, end):
+ # According to the spec the enter sequence is four pulses at 2kHz
+ # followed by four at 1kHz. We do not check the frequency but simply
+ # check the lengths of successive pulses against the first. This means
+ # we have no need to account for the accuracy (or lack of) of the
+ # host's oscillator.
+ if self.eseq_pairnum == 0 or abs(self.eseq_reflen - (end - start)) > 2:
+ self.eseq_potential_start(start, end)
+
+ elif self.eseq_pairnum < 4:
+ # The next three pulses should be the same length as the first.
+ self.eseq_pairnum += 1
+
+ if self.eseq_pairnum == 4:
+ self.eseq_reflen /= 2
+ else:
+ # The final four pulses should each be half the length of the
+ # initial pair. Again, a mismatch causes us to reset and use the
+ # current pulse as a new potential enter sequence start.
+ self.eseq_pairnum += 1
+ if self.eseq_pairnum == 8:
+ # Four matching pulses followed by four more that match each
+ # other but are half the length of the first 4. SWIM is active!
+ self.put(self.eseq_pairstart, end, self.out_ann, [1, ['enter sequence', 'enter seq', 'enter', 'ent', 'e']])
+ self.eseq_pairnum = 0
+
+ def decode(self):
+ while True:
+ if self.bit_maxlen >= 0:
+ (swim,) = self.wait()
+ self.bit_maxlen -= 1
+ else:
+ (swim,) = self.wait({0: 'e'})
+
+ if swim != self.eseq_edge[1][0]:
+ if swim == 1 and self.eseq_edge[1][1] is not None:
+ self.detect_synchronize_frame(self.eseq_edge[1][1], self.samplenum)
+ if self.eseq_edge[0][1] is not None:
+ self.detect_enter_sequence(self.eseq_edge[0][1], self.samplenum)
+ self.eseq_edge.pop(0)
+ self.eseq_edge.append([swim, self.samplenum])
+
+ if (swim != self.bit_edge[1][0] and (swim != 1 or self.bit_edge[1][0] != -1)) or self.bit_maxlen == 0:
+ if self.bit_maxlen == 0 and self.bit_edge[1][0] == 1:
+ swim = -1
+
+ if self.bit_edge[1][0] != 0 and swim == 0:
+ self.bit_maxlen = self.bit_reflen
+
+ if self.bit_edge[0][0] == 0 and self.bit_edge[1][0] == 1 and self.samplenum - self.bit_edge[0][1] <= self.bit_reflen + 2:
+ self.bit(self.bit_edge[0][1], self.bit_edge[1][1], self.samplenum)
+
+ self.bit_edge.pop(0)
+ self.bit_edge.append([swim, self.samplenum])
##
import sigrokdecode as srd
+from common.srdhelper import bitpack
from math import floor, ceil
'''
if self.startsample[rxtx] == -1:
self.startsample[rxtx] = self.samplenum
- # Get the next data bit in LSB-first or MSB-first fashion.
- if self.options['bit_order'] == 'lsb-first':
- self.datavalue[rxtx] >>= 1
- self.datavalue[rxtx] |= \
- (signal << (self.options['num_data_bits'] - 1))
- else:
- self.datavalue[rxtx] <<= 1
- self.datavalue[rxtx] |= (signal << 0)
-
self.putg([rxtx + 12, ['%d' % signal]])
# Store individual data bits and their start/end samplenumbers.
if self.cur_data_bit[rxtx] < self.options['num_data_bits']:
return
+ # Convert accumulated data bits to a data value.
+ bits = [b[0] for b in self.databits[rxtx]]
+ if self.options['bit_order'] == 'msb-first':
+ bits.reverse()
+ self.datavalue[rxtx] = bitpack(bits)
self.putpx(rxtx, ['DATA', rxtx,
(self.datavalue[rxtx], self.databits[rxtx])])
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Google, Inc
+## Copyright (C) 2018 davidanger <davidanger@163.com>
+## Copyright (C) 2018 Peter Hazenberg <sigrok@haas-en-berg.nl>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
12: 'WAIT',
13: 'SOFT RESET',
14: 'reserved',
- 15: 'reserved'
+ 15: 'reserved',
+ 16: 'Not Supported',
+ 17: 'Get_Source_Cap_Extended',
+ 18: 'Get_Status',
+ 19: 'FR_Swap',
+ 20: 'Get_PPS_Status',
+ 21: 'Get_Country_Codes',
}
# Data message type
2: 'REQUEST',
3: 'BIST',
4: 'SINK CAP',
+ 5: 'Battery_Status',
+ 6: 'Alert',
+ 7: 'Get_Country_Info',
15: 'VDM'
}
]
RDO_FLAGS = {
+ (1 << 23): 'unchunked',
(1 << 24): 'no_suspend',
(1 << 25): 'comm_cap',
(1 << 26): 'cap_mismatch',
(1 << 27): 'give_back'
}
-PDO_TYPE = ['', 'BATT:', 'VAR:', '<bad>']
-PDO_FLAGS = {
- (1 << 29): 'dual_role_power',
- (1 << 28): 'suspend',
- (1 << 27): 'ext',
- (1 << 26): 'comm_cap',
- (1 << 25): 'dual_role_data'
-}
BIST_MODES = {
0: 'Receiver',
}
VDM_ACK = ['REQ', 'ACK', 'NAK', 'BSY']
+
class SamplerateError(Exception):
pass
inputs = ['logic']
outputs = ['usb_pd']
channels = (
- {'id': 'cc', 'name': 'CC', 'desc': 'Control channel'},
+ {'id': 'cc1', 'name': 'CC1', 'desc': 'Configuration Channel 1'},
+ )
+ optional_channels = (
+ {'id': 'cc2', 'name': 'CC2', 'desc': 'Configuration Channel 2'},
)
options = (
- {'id': 'fulltext', 'desc': 'full text decoding of the packet',
+ {'id': 'fulltext', 'desc': 'Full text decoding of packets',
'default': 'no', 'values': ('yes', 'no')},
)
annotations = (
('type', 'Packet Type'),
- ('Preamble', 'Preamble'),
- ('SOP', 'Start of Packet'),
- ('Head', 'Header'),
- ('Data', 'Data'),
- ('CRC', 'Checksum'),
- ('EOP', 'End Of Packet'),
- ('Sym', '4b5b symbols'),
+ ('preamble', 'Preamble'),
+ ('sop', 'Start of Packet'),
+ ('header', 'Header'),
+ ('data', 'Data'),
+ ('crc', 'Checksum'),
+ ('eop', 'End Of Packet'),
+ ('sym', '4b5b symbols'),
('warnings', 'Warnings'),
('src', 'Source Message'),
('snk', 'Sink Message'),
('text', 'Plain text'),
)
annotation_rows = (
- ('4B5B', 'symbols', (7, )),
- ('Phase', 'parts', (1, 2, 3, 4, 5, 6, )),
- ('payload', 'Payload', (11, )),
- ('type', 'Type', (0, 9, 10, )),
- ('warnings', 'Warnings', (8, )),
- ('text', 'Full text', (12, )),
+ ('4b5b', 'Symbols', (7,)),
+ ('phase', 'Parts', (1, 2, 3, 4, 5, 6)),
+ ('payload', 'Payload', (11,)),
+ ('type', 'Type', (0, 9, 10)),
+ ('warnings', 'Warnings', (8,)),
+ ('text', 'Full text', (12,)),
)
binary = (
('raw-data', 'RAW binary data'),
)
+ stored_pdos = {}
+
def get_request(self, rdo):
pos = (rdo >> 28) & 7
- op_ma = ((rdo >> 10) & 0x3ff) * 10
- max_ma = (rdo & 0x3ff) * 10
- flags = ''
+
+ op_ma = ((rdo >> 10) & 0x3ff) * 0.01
+ max_ma = (rdo & 0x3ff) * 0.01
+
+ mark = self.cap_mark[pos]
+ if mark == 3:
+ op_v = ((rdo >> 9) & 0x7ff) * 0.02
+ op_a = (rdo & 0x3f) * 0.05
+ t_settings = '%gV %gA' % (op_v, op_a)
+ elif mark == 2:
+ op_w = ((rdo >> 10) & 0x3ff) * 0.25
+ mp_w = (rdo & 0x3ff) * 0.25
+ t_settings = '%gW (operating)' % op_w
+ else:
+ op_a = ((rdo >> 10) & 0x3ff) * 0.01
+ max_a = (rdo & 0x3ff) * 0.01
+ t_settings = '%gA (operating) / %gA (max)' % (op_a, max_a)
+
+ t_flags = ''
for f in sorted(RDO_FLAGS.keys(), reverse = True):
if rdo & f:
- flags += ' ' + RDO_FLAGS[f]
- return '[%d]%d/%d mA%s' % (pos, op_ma, max_ma, flags)
-
- def get_source_cap(self, pdo):
- t = (pdo >> 30) & 3
- if t == 0:
- mv = ((pdo >> 10) & 0x3ff) * 50
- ma = ((pdo >> 0) & 0x3ff) * 10
- p = '%.1fV %.1fA' % (mv/1000.0, ma/1000.0)
- elif t == 1:
- minv = ((pdo >> 10) & 0x3ff) * 50
- maxv = ((pdo >> 20) & 0x3ff) * 50
- mw = ((pdo >> 0) & 0x3ff) * 250
- p = '%.1f/%.1fV %.1fW' % (minv/1000.0, maxv/1000.0, mw/1000.0)
- elif t == 2:
- minv = ((pdo >> 10) & 0x3ff) * 50
- maxv = ((pdo >> 20) & 0x3ff) * 50
- ma = ((pdo >> 0) & 0x3ff) * 10
- p = '%.1f/%.1fV %.1fA' % (minv/1000.0, maxv/1000.0, ma/1000.0)
- else:
- p = ''
- flags = ''
- for f in sorted(PDO_FLAGS.keys(), reverse = True):
- if pdo & f:
- flags += ' ' + PDO_FLAGS[f]
- return '%s%s%s' % (PDO_TYPE[t], p, flags)
-
- def get_sink_cap(self, pdo):
- t = (pdo >> 30) & 3
- if t == 0:
- mv = ((pdo >> 10) & 0x3ff) * 50
- ma = ((pdo >> 0) & 0x3ff) * 10
- p = '%.1fV %.1fA' % (mv/1000.0, ma/1000.0)
- elif t == 1:
- minv = ((pdo >> 10) & 0x3ff) * 50
- maxv = ((pdo >> 20) & 0x3ff) * 50
- mw = ((pdo >> 0) & 0x3ff) * 250
- p = '%.1f/%.1fV %.1fW' % (minv/1000.0, maxv/1000.0, mw/1000.0)
- elif t == 2:
- minv = ((pdo >> 10) & 0x3ff) * 50
- maxv = ((pdo >> 20) & 0x3ff) * 50
- ma = ((pdo >> 0) & 0x3ff) * 10
- p = '%.1f/%.1fV %.1fA' % (minv/1000.0, maxv/1000.0, ma/1000.0)
+ t_flags += ' [' + RDO_FLAGS[f] + ']'
+
+ if pos in self.stored_pdos.keys():
+ t_pdo = '#%d: %s' % (pos, self.stored_pdos[pos])
else:
- p = ''
- flags = ''
- for f in sorted(PDO_FLAGS.keys(), reverse = True):
+ t_pdo = '#d' % (pos)
+
+ return '(PDO %s) %s%s' % (t_pdo, t_settings, t_flags)
+
+ def get_source_sink_cap(self, pdo, idx, source):
+ t1 = (pdo >> 30) & 3
+ self.cap_mark[idx] = t1
+
+ flags = {}
+ if t1 == 0:
+ t_name = 'Fixed'
+ if source:
+ flags = {
+ (1 << 29): 'dual_role_power',
+ (1 << 28): 'suspend',
+ (1 << 27): 'unconstrained',
+ (1 << 26): 'comm_cap',
+ (1 << 25): 'dual_role_data',
+ (1 << 24): 'unchunked',
+ }
+ else: # Sink
+ flags = {
+ (1 << 29): 'dual_role_power',
+ (1 << 28): 'high_capability',
+ (1 << 27): 'unconstrained',
+ (1 << 26): 'comm_cap',
+ (1 << 25): 'dual_role_data',
+ (0b01 << 23): 'fr_swap default power',
+ (0b10 << 23): 'fr_swap 1.5 A',
+ (0b11 << 23): 'fr_swap 3.0 A',
+ }
+ mv = ((pdo >> 10) & 0x3ff) * 0.05
+ ma = ((pdo >> 0) & 0x3ff) * 0.01
+ p = '%gV %gA (%gW)' % (mv, ma, mv*ma)
+ self.stored_pdos[idx] = '%s %gV' % (t_name, mv)
+ elif t1 == 1:
+ t_name = 'Battery'
+ flags = {} # No flags defined for Battery PDO in PD 3.0 spec
+ minv = ((pdo >> 10) & 0x3ff) * 0.05
+ maxv = ((pdo >> 20) & 0x3ff) * 0.05
+ mw = ((pdo >> 0) & 0x3ff) * 0.25
+ p = '%g/%gV %gW' % (minv, maxv, mw)
+ self.stored_pdos[idx] = '%s %g/%gV' % (t_name, minv, maxv)
+ elif t1 == 2:
+ t_name = 'Variable'
+ flags = {} # No flags defined for Variable PDO in PD 3.0 spec
+ minv = ((pdo >> 10) & 0x3ff) * 0.05
+ maxv = ((pdo >> 20) & 0x3ff) * 0.05
+ ma = ((pdo >> 0) & 0x3ff) * 0.01
+ p = '%g/%gV %gA' % (minv, maxv, ma)
+ self.stored_pdos[idx] = '%s %g/%gV' % (t_name, minv, maxv)
+ elif t1 == 3:
+ t2 = (pdo >> 28) & 3
+ if t2 == 0:
+ t_name = 'Programmable|PPS'
+ flags = {
+ (1 << 29): 'power_limited',
+ }
+ minv = ((pdo >> 8) & 0xff) * 0.1
+ maxv = ((pdo >> 17) & 0xff) * 0.1
+ ma = ((pdo >> 0) & 0xff) * 0.05
+ p = '%g/%gV %gA' % (minv, maxv, ma)
+ if (pdo >> 27) & 0x1:
+ p += ' [limited]'
+ self.stored_pdos[idx] = '%s %g/%gV' % (t_name, minv, maxv)
+ else:
+ t_name = 'Reserved APDO: '+bin(t2)
+ p = '[raw: %s]' % (bin(pdo))
+ self.stored_pdos[idx] = '%s %s' % (t_name, p)
+ t_flags = ''
+ for f in sorted(flags.keys(), reverse = True):
if pdo & f:
- flags += ' ' + PDO_FLAGS[f]
- return '%s%s%s' % (PDO_TYPE[t], p, flags)
+ t_flags += ' [' + flags[f] + ']'
+ return '[%s] %s%s' % (t_name, p, t_flags)
def get_vdm(self, idx, data):
- if idx == 0: # VDM header
+ if idx == 0: # VDM header
vid = data >> 16
struct = data & (1 << 15)
txt = 'VDM'
- if struct: # Structured VDM
+ if struct: # Structured VDM
cmd = data & 0x1f
src = data & (1 << 5)
ack = (data >> 6) & 3
txt = VDM_ACK[ack] + ' '
txt += VDM_CMDS[cmd] if cmd in VDM_CMDS else 'cmd?'
txt += ' pos %d' % (pos) if pos else ' '
- else: # Unstructured VDM
+ else: # Unstructured VDM
txt = 'unstruct [%04x]' % (data & 0x7fff)
txt += ' SVID:%04x' % (vid)
- else: # VDM payload
+ else: # VDM payload
txt = 'VDO:%08x' % (data)
return txt
mode_name = BIST_MODES[mode] if mode in BIST_MODES else 'INVALID'
if mode == 2:
mode_name = 'Counter[= %d]' % (counter)
- # TODO check all 0 bits are 0 / emit warnings
+ # TODO: Check all 0 bits are 0 / emit warnings.
return 'mode %s' % (mode_name) if idx == 0 else 'invalid BRO'
def putpayload(self, s0, s1, idx):
t = self.head_type()
- txt = '???'
+ txt = '['+str(idx+1)+'] '
if t == 2:
- txt = self.get_request(self.data[idx])
- elif t == 1:
- txt = self.get_source_cap(self.data[idx])
- elif t == 4:
- txt = self.get_sink_cap(self.data[idx])
+ txt += self.get_request(self.data[idx])
+ elif t == 1 or t == 4:
+ txt += self.get_source_sink_cap(self.data[idx], idx+1, t==1)
elif t == 15:
- txt = self.get_vdm(idx, self.data[idx])
+ txt += self.get_vdm(idx, self.data[idx])
elif t == 3:
- txt = self.get_bist(idx, self.data[idx])
+ txt += self.get_bist(idx, self.data[idx])
self.putx(s0, s1, [11, [txt, txt]])
self.text += ' - ' + txt
else:
shortm = DATA_TYPES[t] if t in DATA_TYPES else 'DAT???'
- longm = '{:s}[{:d}]:{:s}'.format(role, self.head_id(), shortm)
+ longm = '(r{:d}) {:s}[{:d}]: {:s}'.format(self.head_rev(), role, self.head_id(), shortm)
self.putx(0, -1, [ann_type, [longm, shortm]])
self.text += longm
def get_short(self):
i = self.idx
- # Check it's not a truncated packet
+ # Check it's not a truncated packet.
if len(self.bits) - i <= 20:
self.putwarn('Truncated', '!')
return 0x0BAD
k = [self.get_sym(i), self.get_sym(i+5),
self.get_sym(i+10), self.get_sym(i+15)]
- # TODO check bad symbols
+ # TODO: Check bad symbols.
val = k[0] | (k[1] << 4) | (k[2] << 8) | (k[3] << 12)
self.idx += 20
return val
sym = START_OF_PACKETS.get(k, None)
if not sym:
sym = self.find_corrupted_sop(k)
- # We have an interesting symbol sequence
+ # We have an interesting symbol sequence.
if sym:
- # annotate the preamble
+ # Annotate the preamble.
self.putx(0, i, [1, ['Preamble', '...']])
- # annotate each symbol
+ # Annotate each symbol.
self.rec_sym(i, k[0])
self.rec_sym(i+5, k[1])
self.rec_sym(i+10, k[2])
self.rec_sym(i+15, k[3])
if sym == 'Hard Reset':
self.text += 'HRST'
- return -1 # Hard reset
+ return -1 # Hard reset
elif sym == 'Cable Reset':
self.text += 'CRST'
- return -1 # Cable reset
+ return -1 # Cable reset
else:
self.putx(i, i+20, [2, [sym, 'S']])
return i+20
self.putx(0, len(self.bits), [1, ['Junk???', 'XXX']])
self.text += 'Junk???'
self.putwarn('No start of packet found', 'XXX')
- return -1 # No Start Of Packet
+ return -1 # No Start Of Packet
def __init__(self):
self.reset()
self.bad = []
self.half_one = False
self.start_one = 0
+ self.stored_pdos = {}
+ self.cap_mark = [0, 0, 0, 0, 0, 0, 0, 0]
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
- # 0 is 2 UI, space larger than 1.5x 0 is definitely wrong
+ # 0 is 2 UI, space larger than 1.5x 0 is definitely wrong.
self.maxbit = self.us2samples(3 * UI_US)
- # duration threshold between half 1 and 0
+ # Duration threshold between half 1 and 0.
self.threshold = self.us2samples(THRESHOLD_US)
def start(self):
- self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_bitrate = self.register(
self.text = ''
if len(self.edges) < 50:
- return # Not a real PD packet
+ return # Not a real PD packet
self.packet_seq += 1
tstamp = float(self.startsample) / self.samplerate
self.idx = self.scan_eop()
if self.idx < 0:
- # Full text trace of the issue
+ # Full text trace of the issue.
self.putx(0, self.idx, [12, [self.text, '...']])
- return # No real packet: ABORT
+ return # No real packet: ABORT.
# Packet header
self.head = self.get_short()
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
while True:
- self.wait({0: 'e'})
+ pins = self.wait([{0: 'e'}, {1: 'e'}, {'skip': int(self.samplerate/1e3)}])
- # First sample of the packet, just record the start date
+ # First sample of the packet, just record the start date.
if not self.startsample:
self.startsample = self.samplenum
self.previous = self.samplenum
diff = self.samplenum - self.previous
- # Large idle: use it as the end of packet
+ # Large idle: use it as the end of packet.
if diff > self.maxbit:
- # the last edge of the packet
+ # The last edge of the packet.
self.edges.append(self.previous)
- # Export the packet
+ # Export the packet.
self.decode_packet()
- # Reset for next packet
+ # Reset for next packet.
self.startsample = self.samplenum
self.bits = []
self.edges = []
self.bad = []
self.half_one = False
self.start_one = 0
- else: # add the bit to the packet
+ else: # Add the bit to the packet.
is_zero = diff > self.threshold
if is_zero and not self.half_one:
self.bits.append(0)
elif not is_zero and not self.half_one:
self.half_one = True
self.start_one = self.previous
- else: # Invalid BMC sequence
+ else: # Invalid BMC sequence
self.bad.append((self.start_one, self.previous))
- # TODO try to recover
+ # TODO: Try to recover.
self.bits.append(0)
self.edges.append(self.previous)
self.half_one = False
PyErr_Clear();
srd_dbg("Failed to stringify object.");
}
+
return str;
}
PyErr_Clear();
srd_dbg("Failed to get object attribute %s.", attr);
}
+
return str;
}
/** @private */
SRD_PRIV void srd_exception_catch(const char *format, ...)
{
+ int i, ret;
va_list args;
PyObject *py_etype, *py_evalue, *py_etraceback;
PyObject *py_mod, *py_func, *py_tracefmt;
- char *msg, *etype_name, *evalue_str, *tracefmt_str;
+ char *msg, *etype_name, *evalue_str, *outstr;
const char *etype_name_fallback;
PyGILState_STATE gstate;
+ GString *s;
py_etype = py_evalue = py_etraceback = py_mod = py_func = NULL;
/* Call into Python to format the stack trace. */
py_tracefmt = PyObject_CallFunctionObjArgs(py_func,
py_etype, py_evalue, py_etraceback, NULL);
- if (!py_tracefmt)
+ if (!py_tracefmt || !PyList_Check(py_tracefmt))
goto cleanup;
- tracefmt_str = py_stringify(py_tracefmt);
- Py_DECREF(py_tracefmt);
-
- /* Log the detailed stack trace. */
- if (tracefmt_str) {
- srd_dbg("%s", tracefmt_str);
- g_free(tracefmt_str);
+ s = g_string_sized_new(128);
+ for (i = 0; i < PyList_Size(py_tracefmt); i++) {
+ ret = py_listitem_as_str(py_tracefmt, i, &outstr);
+ if (ret == 0) {
+ s = g_string_append(s, outstr);
+ g_free(outstr);
+ }
}
+ srd_err("%s", s->str);
+ g_string_free(s, TRUE);
+
+ Py_DECREF(py_tracefmt);
cleanup:
Py_XDECREF(py_func);
return ret;
}
-/* Helper GComparefunc for g_slist_find_custom() in srd_inst_channel_set_all() */
+/* Helper GComparefunc for g_slist_find_custom() in srd_inst_channel_set_all(). */
static gint compare_channel_id(const struct srd_channel *pdch,
const char *channel_id)
{
return SRD_ERR_ARG;
}
- new_channelmap = g_malloc(sizeof(int) * di->dec_num_channels);
+ new_channelmap = g_malloc0(sizeof(int) * di->dec_num_channels);
/*
* For now, map all indexes to channel -1 (can be overridden later).
srd_dbg("Final channel map:");
num_required_channels = g_slist_length(di->decoder->channels);
for (i = 0; i < di->dec_num_channels; i++) {
- GSList *l = g_slist_nth(di->decoder->channels, i);
- if (!l)
- l = g_slist_nth(di->decoder->opt_channels,
+ GSList *ll = g_slist_nth(di->decoder->channels, i);
+ if (!ll)
+ ll = g_slist_nth(di->decoder->opt_channels,
i - num_required_channels);
- pdch = l->data;
+ pdch = ll->data;
srd_dbg(" - PD ch idx %d (%s) = input data ch idx %d (%s)", i,
pdch->id, new_channelmap[i],
(i < num_required_channels) ? "required" : "optional");
pdch = g_slist_nth(di->decoder->channels, i)->data;
srd_err("Required channel '%s' (index %d) was not specified.",
pdch->id, i);
+ g_free(new_channelmap);
return SRD_ERR;
}
* Create a new protocol decoder instance.
*
* @param sess The session holding the protocol decoder instance.
+ * Must not be NULL.
* @param decoder_id Decoder 'id' field.
* @param options GHashtable of options which override the defaults set in
* the decoder class. May be NULL.
i = 1;
srd_dbg("Creating new %s instance.", decoder_id);
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return NULL;
- }
if (!(dec = srd_decoder_get_by_id(decoder_id))) {
srd_err("Protocol decoder %s not found.", decoder_id);
di->got_new_samples = FALSE;
di->handled_all_samples = FALSE;
di->want_wait_terminate = FALSE;
+ di->decoder_state = SRD_OK;
/*
* Strictly speaking initialization of statically allocated
srd_dbg("%s: Resetting decoder state.", di->inst_id);
- /*
- * Reset internal state of the decoder.
- */
+ /* Reset internal state of the decoder. */
condition_list_free(di);
match_array_free(di);
di->abs_start_samplenum = 0;
di->got_new_samples = FALSE;
di->handled_all_samples = FALSE;
di->want_wait_terminate = FALSE;
+ di->decoder_state = SRD_OK;
/* Conditions and mutex got reset after joining the thread. */
}
* Stack a decoder instance on top of another.
*
* @param sess The session holding the protocol decoder instances.
+ * Must not be NULL.
* @param di_bottom The instance on top of which di_top will be stacked.
* @param di_top The instance to go on top.
*
struct srd_decoder_inst *di_bottom,
struct srd_decoder_inst *di_top)
{
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return SRD_ERR_ARG;
- }
if (!di_bottom || !di_top) {
srd_err("Invalid from/to instance pair.");
* given session.
*
* @param sess The session holding the protocol decoder instance.
+ * Must not be NULL.
* @param inst_id The instance ID to be found.
*
* @return Pointer to struct srd_decoder_inst, or NULL if not found.
GSList *l;
struct srd_decoder_inst *tmp, *di;
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return NULL;
- }
di = NULL;
for (l = sess->di_list; l; l = l->next) {
return di;
}
-static struct srd_decoder_inst *srd_sess_inst_find_by_obj(
- struct srd_session *sess, const GSList *stack,
- const PyObject *obj)
-{
- const GSList *l;
- struct srd_decoder_inst *tmp, *di;
-
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
- return NULL;
- }
-
- di = NULL;
- for (l = stack ? stack : sess->di_list; di == NULL && l != NULL; l = l->next) {
- tmp = l->data;
- if (tmp->py_inst == obj)
- di = tmp;
- else if (tmp->next_di)
- di = srd_sess_inst_find_by_obj(sess, tmp->next_di, obj);
- }
-
- return di;
-}
-
-/**
- * Find a decoder instance by its Python object.
- *
- * I.e. find that instance's instantiation of the sigrokdecode.Decoder class.
- * This will recurse to find the instance anywhere in the stack tree of all
- * sessions.
- *
- * @param stack Pointer to a GSList of struct srd_decoder_inst, indicating the
- * stack to search. To start searching at the bottom level of
- * decoder instances, pass NULL.
- * @param obj The Python class instantiation.
- *
- * @return Pointer to struct srd_decoder_inst, or NULL if not found.
- *
- * @private
- *
- * @since 0.1.0
- */
-SRD_PRIV struct srd_decoder_inst *srd_inst_find_by_obj(const GSList *stack,
- const PyObject *obj)
-{
- struct srd_decoder_inst *di;
- struct srd_session *sess;
- GSList *l;
-
- di = NULL;
- for (l = sessions; di == NULL && l != NULL; l = l->next) {
- sess = l->data;
- di = srd_sess_inst_find_by_obj(sess, stack, obj);
- }
-
- return di;
-}
-
/**
* Set the list of initial (assumed) pin values.
*
*
* @private
*/
-static gboolean sample_matches(uint8_t old_sample, uint8_t sample, struct srd_term *term)
+__attribute__((always_inline))
+static inline gboolean sample_matches(uint8_t old_sample, uint8_t sample, struct srd_term *term)
{
/* Caller ensures term != NULL. */
py_res = PyObject_CallMethod(di->py_inst, "decode", NULL);
srd_dbg("%s: decode() method terminated.", di->inst_id);
+ if (!py_res)
+ di->decoder_state = SRD_ERR;
+
/*
* Make sure to unblock potentially pending srd_inst_decode()
* calls in application threads after the decode() method might
di->inbuflen = inbuflen;
di->got_new_samples = TRUE;
di->handled_all_samples = FALSE;
- di->want_wait_terminate = FALSE;
/* Signal the thread that we have new data. */
g_cond_signal(&di->got_new_samples_cond);
g_cond_wait(&di->handled_all_samples_cond, &di->data_mutex);
g_mutex_unlock(&di->data_mutex);
+ if (di->want_wait_terminate)
+ return SRD_ERR_TERM_REQ;
+
return SRD_OK;
}
* calls like they would for newly constructed decoder stacks.
*
* @param di The decoder instance to call. Must not be NULL.
+ *
* @return SRD_OK upon success, a (negative) error code otherwise.
+ *
* @private
*/
SRD_PRIV int srd_inst_terminate_reset(struct srd_decoder_inst *di)
}
PyGILState_Release(gstate);
- /*
- * Pass the "restart" request to all stacked decoders.
- */
+ /* Pass the "restart" request to all stacked decoders. */
for (l = di->next_di; l; l = l->next) {
ret = srd_inst_terminate_reset(l->data);
if (ret != SRD_OK)
return ret;
}
- return SRD_OK;
+ return di->decoder_state;
}
/** @private */
struct srd_pd_output *pdo;
PyGILState_STATE gstate;
- srd_dbg("Freeing instance %s", di->inst_id);
+ srd_dbg("Freeing instance %s.", di->inst_id);
srd_inst_join_decode_thread(di);
/** @private */
SRD_PRIV void srd_inst_free_all(struct srd_session *sess)
{
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return;
- }
g_slist_free_full(sess->di_list, (GDestroyNotify)srd_inst_free);
}
SRD_PRIV int srd_decoder_searchpath_add(const char *path);
/* session.c */
-SRD_PRIV int session_is_valid(struct srd_session *sess);
SRD_PRIV struct srd_pd_callback *srd_pd_output_callback_find(struct srd_session *sess,
int output_type);
/* instance.c */
-SRD_PRIV struct srd_decoder_inst *srd_inst_find_by_obj( const GSList *stack,
- const PyObject *obj);
SRD_PRIV int srd_inst_start(struct srd_decoder_inst *di);
SRD_PRIV void match_array_free(struct srd_decoder_inst *di);
SRD_PRIV void condition_list_free(struct srd_decoder_inst *di);
SRD_ERR_BUG = -4, /**< Errors hinting at internal bugs */
SRD_ERR_PYTHON = -5, /**< Python C API error */
SRD_ERR_DECODERS_DIR = -6, /**< Protocol decoder path invalid */
+ SRD_ERR_TERM_REQ = -7, /**< Termination requested */
/*
* Note: When adding entries here, don't forget to also update the
/** Requests termination of wait() and decode(). */
gboolean want_wait_terminate;
+ /** Indicates the current state of the decoder stack. */
+ int decoder_state;
+
GCond got_new_samples_cond;
GCond handled_all_samples_cond;
GMutex data_mutex;
const char *format, va_list args);
SRD_API int srd_log_loglevel_set(int loglevel);
SRD_API int srd_log_loglevel_get(void);
+SRD_API int srd_log_callback_get(srd_log_callback *cb, void **cb_data);
SRD_API int srd_log_callback_set(srd_log_callback cb, void *cb_data);
SRD_API int srd_log_callback_set_default(void);
return SRD_OK;
}
+/**
+ * Get the libsigrokdecode log callback routine and callback data.
+ *
+ * @param[out] cb Pointer to a function pointer to receive the log callback
+ * function. Optional, can be NULL.
+ * @param[out] cb_data Pointer to a void pointer to receive the log callback's
+ * additional arguments. Optional, can be NULL.
+ *
+ * @return SRD_OK upon success.
+ *
+ * @since 0.6.0
+ */
+SRD_API int srd_log_callback_get(srd_log_callback *cb, void **cb_data)
+{
+ if (cb)
+ *cb = srd_log_cb;
+ if (cb_data)
+ *cb_data = srd_log_cb_data;
+
+ return SRD_OK;
+}
+
/**
* Set the libsigrokdecode log callback to the default built-in one.
*
/* This specific log callback doesn't need the void pointer data. */
(void)cb_data;
- /* Only output messages of at least the selected loglevel(s). */
- if (loglevel > cur_loglevel)
- return SRD_OK;
+ (void)loglevel;
if (fputs("srd: ", stderr) < 0
|| g_vfprintf(stderr, format, args) < 0
|| putc('\n', stderr) < 0)
return SRD_ERR;
+ fflush(stderr);
+
return SRD_OK;
}
int ret;
va_list args;
+ /* Only output messages of at least the selected loglevel(s). */
+ if (loglevel > cur_loglevel)
+ return SRD_OK;
+
va_start(args, format);
ret = srd_log_cb(srd_log_cb_data, loglevel, format, args);
va_end(args);
PyGILState_Release(gstate);
return mod;
+
err_out:
Py_XDECREF(mod);
srd_exception_catch("Failed to initialize module");
return NULL;
}
+
/** @endcond */
/** @endcond */
-/** @private */
-SRD_PRIV int session_is_valid(struct srd_session *sess)
-{
-
- if (!sess || sess->session_id < 1)
- return SRD_ERR;
-
- return SRD_OK;
-}
-
/**
* Create a decoding session.
*
* output callbacks.
*
* @param sess A pointer which will hold a pointer to a newly
- * initialized session on return.
+ * initialized session on return. Must not be NULL.
*
* @return SRD_OK upon success, a (negative) error code otherwise.
*
*/
SRD_API int srd_session_new(struct srd_session **sess)
{
-
- if (!sess) {
- srd_err("Invalid session pointer.");
+ if (!sess)
return SRD_ERR_ARG;
- }
*sess = g_malloc(sizeof(struct srd_session));
(*sess)->session_id = ++max_session_id;
* Decoders, instances and stack must have been prepared beforehand,
* and all SRD_CONF parameters set.
*
- * @param sess The session to start.
+ * @param sess The session to start. Must not be NULL.
*
* @return SRD_OK upon success, a (negative) error code otherwise.
*
struct srd_decoder_inst *di;
int ret;
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session pointer.");
- return SRD_ERR;
- }
+ if (!sess)
+ return SRD_ERR_ARG;
srd_dbg("Calling start() on all instances in session %d.", sess->session_id);
/**
* Set a metadata configuration key in a session.
*
- * @param sess The session to configure.
+ * @param sess The session to configure. Must not be NULL.
* @param key The configuration key (SRD_CONF_*).
* @param data The new value for the key, as a GVariant with GVariantType
* appropriate to that key. A floating reference can be passed
GSList *l;
int ret;
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return SRD_ERR_ARG;
- }
if (!key) {
srd_err("Invalid key.");
GSList *d;
int ret;
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return SRD_ERR_ARG;
- }
for (d = sess->di_list; d; d = d->next) {
if ((ret = srd_inst_decode(d->data, abs_start_samplenum,
* processed input data. This avoids the necessity to re-construct the
* decoder stack.
*
- * @param sess The session in which to terminate decoders.
+ * @param sess The session in which to terminate decoders. Must not be NULL.
+ *
* @return SRD_OK upon success, a (negative) error code otherwise.
*
* @since 0.5.1
GSList *d;
int ret;
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return SRD_ERR_ARG;
- }
for (d = sess->di_list; d; d = d->next) {
ret = srd_inst_terminate_reset(d->data);
if (ret != SRD_OK)
return ret;
}
+
return SRD_OK;
}
*
* All decoder instances and output callbacks are properly released.
*
- * @param sess The session to be destroyed.
+ * @param sess The session to be destroyed. Must not be NULL.
*
* @return SRD_OK upon success, a (negative) error code otherwise.
*
{
int session_id;
- if (!sess) {
- srd_err("Invalid session.");
+ if (!sess)
return SRD_ERR_ARG;
- }
session_id = sess->session_id;
if (sess->di_list)
* stack).
*
* @param sess The output session in which to register the callback.
+ * Must not be NULL.
* @param output_type The output type this callback will receive. Only one
* callback per output type can be registered.
* @param cb The function to call. Must not be NULL.
{
struct srd_pd_callback *pd_cb;
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return SRD_ERR_ARG;
- }
srd_dbg("Registering new callback for output type %d.", output_type);
GSList *l;
struct srd_pd_callback *tmp, *pd_cb;
- if (session_is_valid(sess) != SRD_OK) {
- srd_err("Invalid session.");
+ if (!sess)
return NULL;
- }
pd_cb = NULL;
for (l = sess->callbacks; l; l = l->next) {
if (g_file_test(decdir, G_FILE_TEST_IS_DIR))
ret = srd_decoder_searchpath_add(decdir);
else
- ret = SRD_OK; /* just ignore non-existing directory */
+ ret = SRD_OK; /* Just ignore non-existing directory. */
g_free(decdir);
return SRD_ERR_PYTHON;
}
-SRD_API GSList *srd_searchpaths_get(void)
-{
- return g_slist_copy_deep(searchpaths, (GCopyFunc)g_strdup, NULL);
-}
-
/**
* Initialize libsigrokdecode.
*
/* Locations relative to the XDG system data directories. */
sys_datadirs = g_get_system_data_dirs();
for (i = g_strv_length((char **)sys_datadirs); i > 0; i--) {
- ret = searchpath_add_xdg_dir(sys_datadirs[i-1]);
+ ret = searchpath_add_xdg_dir(sys_datadirs[i - 1]);
if (ret != SRD_OK) {
Py_Finalize();
return ret;
{
srd_dbg("Exiting libsigrokdecode.");
- g_slist_foreach(sessions, (GFunc)srd_session_destroy, NULL);
+ for (GSList *l = sessions; l; l = l->next)
+ srd_session_destroy(l->data);
srd_decoder_unload_all();
g_slist_free_full(searchpaths, g_free);
return SRD_ERR_PYTHON;
}
+/**
+ * Return the list of protocol decoder search paths.
+ *
+ * @return The list of search paths used when loading protocol decoders.
+ *
+ * @since 0.5.1
+ */
+SRD_API GSList *srd_searchpaths_get(void)
+{
+ return g_slist_copy_deep(searchpaths, (GCopyFunc)g_strdup, NULL);
+}
+
/** @} */
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
+import errno
import os
import sys
from shutil import copy
try:
os.mkdir(pd_dst)
except OSError as e:
- if e.errno != os.errno.EEXIST:
+ if e.errno != errno.EEXIST:
raise
else:
pass
#include "libsigrokdecode.h"
#include <inttypes.h>
+/** @cond PRIVATE */
+extern SRD_PRIV GSList *sessions;
+/** @endcond */
+
typedef struct {
PyObject_HEAD
} srd_Decoder;
-/* This is only used for nicer srd_dbg() output.
- */
+/* This is only used for nicer srd_dbg() output. */
static const char *output_type_name(unsigned int idx)
{
static const char names[][16] = {
"OUTPUT_META",
"(invalid)"
};
+
return names[MIN(idx, G_N_ELEMENTS(names) - 1)];
}
+static void release_annotation(struct srd_proto_data_annotation *pda)
+{
+ if (!pda)
+ return;
+ if (pda->ann_text)
+ g_strfreev(pda->ann_text);
+}
+
static int convert_annotation(struct srd_decoder_inst *di, PyObject *obj,
struct srd_proto_data *pdata)
{
goto err;
}
- pda = g_malloc(sizeof(struct srd_proto_data_annotation));
+ pda = pdata->data;
pda->ann_class = ann_class;
pda->ann_text = ann_text;
- pdata->data = pda;
PyGILState_Release(gstate);
return SRD_ERR_PYTHON;
}
+static void release_binary(struct srd_proto_data_binary *pdb)
+{
+ if (!pdb)
+ return;
+ g_free((void *)pdb->data);
+}
+
static int convert_binary(struct srd_decoder_inst *di, PyObject *obj,
struct srd_proto_data *pdata)
{
goto err;
}
- pdb = g_malloc(sizeof(struct srd_proto_data_binary));
if (PyBytes_AsStringAndSize(py_tmp, &buf, &size) == -1)
goto err;
PyGILState_Release(gstate);
+ pdb = pdata->data;
pdb->bin_class = bin_class;
pdb->size = size;
if (!(pdb->data = g_try_malloc(pdb->size)))
return SRD_ERR_MALLOC;
memcpy((void *)pdb->data, (const void *)buf, pdb->size);
- pdata->data = pdb;
return SRD_OK;
return SRD_ERR_PYTHON;
}
+static inline struct srd_decoder_inst *srd_sess_inst_find_by_obj(
+ struct srd_session *sess, const GSList *stack, const PyObject *obj)
+{
+ const GSList *l;
+ struct srd_decoder_inst *tmp, *di;
+
+ if (!sess)
+ return NULL;
+
+ di = NULL;
+ for (l = stack ? stack : sess->di_list; di == NULL && l != NULL; l = l->next) {
+ tmp = l->data;
+ if (tmp->py_inst == obj)
+ di = tmp;
+ else if (tmp->next_di)
+ di = srd_sess_inst_find_by_obj(sess, tmp->next_di, obj);
+ }
+
+ return di;
+}
+
+/**
+ * Find a decoder instance by its Python object.
+ *
+ * I.e. find that instance's instantiation of the sigrokdecode.Decoder class.
+ * This will recurse to find the instance anywhere in the stack tree of all
+ * sessions.
+ *
+ * @param stack Pointer to a GSList of struct srd_decoder_inst, indicating the
+ * stack to search. To start searching at the bottom level of
+ * decoder instances, pass NULL.
+ * @param obj The Python class instantiation.
+ *
+ * @return Pointer to struct srd_decoder_inst, or NULL if not found.
+ *
+ * @since 0.1.0
+ */
+static inline struct srd_decoder_inst *srd_inst_find_by_obj(
+ const GSList *stack, const PyObject *obj)
+{
+ struct srd_decoder_inst *di;
+ struct srd_session *sess;
+ GSList *l;
+
+ /* Performance shortcut: Handle the most common case first. */
+ sess = sessions->data;
+ di = sess->di_list->data;
+ if (di->py_inst == obj)
+ return di;
+
+ di = NULL;
+ for (l = sessions; di == NULL && l != NULL; l = l->next) {
+ sess = l->data;
+ di = srd_sess_inst_find_by_obj(sess, stack, obj);
+ }
+
+ return di;
+}
+
static int convert_meta(struct srd_proto_data *pdata, PyObject *obj)
{
long long intvalue;
gstate = PyGILState_Ensure();
- if (pdata->pdo->meta_type == G_VARIANT_TYPE_INT64) {
+ if (g_variant_type_equal(pdata->pdo->meta_type, G_VARIANT_TYPE_INT64)) {
if (!PyLong_Check(obj)) {
PyErr_Format(PyExc_TypeError, "This output was registered "
"as 'int', but something else was passed.");
if (PyErr_Occurred())
goto err;
pdata->data = g_variant_new_int64(intvalue);
- } else if (pdata->pdo->meta_type == G_VARIANT_TYPE_DOUBLE) {
+ } else if (g_variant_type_equal(pdata->pdo->meta_type, G_VARIANT_TYPE_DOUBLE)) {
if (!PyFloat_Check(obj)) {
PyErr_Format(PyExc_TypeError, "This output was registered "
"as 'float', but something else was passed.");
return SRD_ERR_PYTHON;
}
+static void release_meta(GVariant *gvar)
+{
+ if (!gvar)
+ return;
+ g_variant_unref(gvar);
+}
+
static PyObject *Decoder_put(PyObject *self, PyObject *args)
{
GSList *l;
struct srd_decoder_inst *di, *next_di;
struct srd_pd_output *pdo;
struct srd_proto_data pdata;
+ struct srd_proto_data_annotation pda;
+ struct srd_proto_data_binary pdb;
uint64_t start_sample, end_sample;
int output_id;
struct srd_pd_callback *cb;
PyGILState_STATE gstate;
+ py_data = NULL;
+
gstate = PyGILState_Ensure();
if (!(di = srd_inst_find_by_obj(NULL, self))) {
case SRD_OUTPUT_ANN:
/* Annotations are only fed to callbacks. */
if ((cb = srd_pd_output_callback_find(di->sess, pdo->output_type))) {
+ pdata.data = &pda;
/* Convert from PyDict to srd_proto_data_annotation. */
if (convert_annotation(di, py_data, &pdata) != SRD_OK) {
/* An error was already logged. */
Py_BEGIN_ALLOW_THREADS
cb->cb(&pdata, cb->cb_data);
Py_END_ALLOW_THREADS
+ release_annotation(pdata.data);
}
break;
case SRD_OUTPUT_PYTHON:
Py_XDECREF(py_res);
}
if ((cb = srd_pd_output_callback_find(di->sess, pdo->output_type))) {
- /* Frontends aren't really supposed to get Python
- * callbacks, but it's useful for testing. */
+ /*
+ * Frontends aren't really supposed to get Python
+ * callbacks, but it's useful for testing.
+ */
pdata.data = py_data;
cb->cb(&pdata, cb->cb_data);
}
break;
case SRD_OUTPUT_BINARY:
if ((cb = srd_pd_output_callback_find(di->sess, pdo->output_type))) {
+ pdata.data = &pdb;
/* Convert from PyDict to srd_proto_data_binary. */
if (convert_binary(di, py_data, &pdata) != SRD_OK) {
/* An error was already logged. */
Py_BEGIN_ALLOW_THREADS
cb->cb(&pdata, cb->cb_data);
Py_END_ALLOW_THREADS
+ release_binary(pdata.data);
}
break;
case SRD_OUTPUT_META:
Py_BEGIN_ALLOW_THREADS
cb->cb(&pdata, cb->cb_data);
Py_END_ALLOW_THREADS
+ release_meta(pdata.data);
}
break;
default:
const GVariantType *meta_type_gv;
int output_type;
char *proto_id, *meta_name, *meta_descr;
- char *keywords[] = {"output_type", "proto_id", "meta", NULL};
+ char *keywords[] = { "output_type", "proto_id", "meta", NULL };
PyGILState_STATE gstate;
gboolean is_meta;
GSList *l;
goto err;
}
- /* Default to instance id, which defaults to class id. */
+ /* Default to instance ID, which defaults to class ID. */
proto_id = di->inst_id;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|s(Oss)", keywords,
&output_type, &proto_id,
}
}
- srd_dbg("Instance %s checking registration type %d for %s.",
- di->inst_id, output_type, proto_id);
pdo = NULL;
for (l = di->pd_output; l; l = l->next) {
cmp = l->data;
return SRD_TERM_EITHER_EDGE;
case 'n':
return SRD_TERM_NO_EDGE;
+ default:
+ return -1;
}
return -1;
PyObject *py_pinvalues;
PyGILState_STATE gstate;
- gstate = PyGILState_Ensure();
-
if (!di) {
srd_err("Invalid decoder instance.");
- PyGILState_Release(gstate);
return NULL;
}
+ gstate = PyGILState_Ensure();
+
py_pinvalues = PyTuple_New(di->dec_num_channels);
for (i = 0; i < di->dec_num_channels; i++) {
srd_err("Failed to get the value.");
goto err;
}
- term = g_malloc0(sizeof(struct srd_term));
+ term = g_malloc(sizeof(struct srd_term));
term->type = get_term_type(term_str);
term->channel = PyLong_AsLong(py_key);
g_free(term_str);
srd_err("Failed to get number of samples to skip.");
goto err;
}
- term = g_malloc0(sizeof(struct srd_term));
+ term = g_malloc(sizeof(struct srd_term));
term->type = SRD_TERM_SKIP;
term->num_samples_to_skip = num_samples_to_skip;
term->num_samples_already_skipped = 0;
GSList *term_list;
condition_list_free(di);
- term = g_malloc0(sizeof(*term));
+ term = g_malloc(sizeof(*term));
term->type = SRD_TERM_SKIP;
term->num_samples_to_skip = count;
term->num_samples_already_skipped = 0;
* while the termination request still gets signalled.
*/
found_match = FALSE;
- ret = process_samples_until_condition_match(di, &found_match);
+
+ /* Ignore return value for now, should never be negative. */
+ (void)process_samples_until_condition_match(di, &found_match);
Py_END_ALLOW_THREADS
}
static PyMethodDef Decoder_methods[] = {
- {"put", Decoder_put, METH_VARARGS,
- "Accepts a dictionary with the following keys: startsample, endsample, data"},
- {"register", (PyCFunction)Decoder_register, METH_VARARGS|METH_KEYWORDS,
- "Register a new output stream"},
- {"wait", Decoder_wait, METH_VARARGS,
- "Wait for one or more conditions to occur"},
- {"has_channel", Decoder_has_channel, METH_VARARGS,
- "Report whether a channel was supplied"},
+ { "put", Decoder_put, METH_VARARGS,
+ "Accepts a dictionary with the following keys: startsample, endsample, data" },
+ { "register", (PyCFunction)Decoder_register, METH_VARARGS|METH_KEYWORDS,
+ "Register a new output stream" },
+ { "wait", Decoder_wait, METH_VARARGS,
+ "Wait for one or more conditions to occur" },
+ { "has_channel", Decoder_has_channel, METH_VARARGS,
+ "Report whether a channel was supplied" },
{NULL, NULL, 0, NULL}
};
goto err;
}
+ PyGILState_Release(gstate);
+
return py_str_as_str(py_value, outstr);
err:
}
*out_strv = strv;
+ PyGILState_Release(gstate);
+
return SRD_OK;
err_out:
}
if (!var)
srd_exception_catch("Failed to extract string value");
-
} else if (PyLong_Check(py_obj)) { /* integer */
int64_t val;
var = g_variant_new_int64(val);
else
srd_exception_catch("Failed to extract integer value");
-
} else if (PyFloat_Check(py_obj)) { /* float */
double val;
var = g_variant_new_double(val);
else
srd_exception_catch("Failed to extract float value");
-
} else {
srd_err("Failed to extract value of unsupported type.");
}