mirror of
https://github.com/wagiminator/ATtiny814-USB-PD-Adapter.git
synced 2026-02-04 05:58:05 +03:00
Initial commit
This commit is contained in:
608
software/tools/pymcuprog/libs/appdirs.py
Normal file
608
software/tools/pymcuprog/libs/appdirs.py
Normal file
@@ -0,0 +1,608 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
||||
# Copyright (c) 2013 Eddy Petrișor
|
||||
|
||||
"""Utilities for determining application-specific dirs.
|
||||
|
||||
See <http://github.com/ActiveState/appdirs> for details and usage.
|
||||
"""
|
||||
# Dev Notes:
|
||||
# - MSDN on where to store app data files:
|
||||
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
||||
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
||||
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
|
||||
__version__ = "1.4.4"
|
||||
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
unicode = str
|
||||
|
||||
if sys.platform.startswith('java'):
|
||||
import platform
|
||||
os_name = platform.java_ver()[3][0]
|
||||
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
|
||||
system = 'win32'
|
||||
elif os_name.startswith('Mac'): # "Mac OS X", etc.
|
||||
system = 'darwin'
|
||||
else: # "Linux", "SunOS", "FreeBSD", etc.
|
||||
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
||||
# are actually checked for and the rest of the module expects
|
||||
# *sys.platform* style strings.
|
||||
system = 'linux2'
|
||||
else:
|
||||
system = sys.platform
|
||||
|
||||
|
||||
|
||||
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user data directories are:
|
||||
Mac OS X: ~/Library/Application Support/<AppName>
|
||||
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
||||
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
||||
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
||||
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
||||
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
||||
That means, by default "~/.local/share/<AppName>".
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
||||
path = os.path.normpath(_get_win_folder(const))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Application Support/')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
r"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to *nix
|
||||
which indicates that the entire list of data dirs should be
|
||||
returned. By default, the first item from XDG_DATA_DIRS is
|
||||
returned, or '/usr/local/share/<AppName>',
|
||||
if XDG_DATA_DIRS is not set
|
||||
|
||||
Typical site data directories are:
|
||||
Mac OS X: /Library/Application Support/<AppName>
|
||||
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
||||
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
||||
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
||||
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
||||
|
||||
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('/Library/Application Support')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
# XDG default for $XDG_DATA_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_DATA_DIRS',
|
||||
os.pathsep.join(['/usr/local/share', '/usr/share']))
|
||||
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific config dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user config directories are:
|
||||
Mac OS X: same as user_data_dir
|
||||
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
||||
Win *: same as user_data_dir
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
||||
That means, by default "~/.config/<AppName>".
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = user_data_dir(appname, appauthor, None, roaming)
|
||||
else:
|
||||
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
r"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to *nix
|
||||
which indicates that the entire list of config dirs should be
|
||||
returned. By default, the first item from XDG_CONFIG_DIRS is
|
||||
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
||||
|
||||
Typical site config directories are:
|
||||
Mac OS X: same as site_data_dir
|
||||
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
||||
$XDG_CONFIG_DIRS
|
||||
Win *: same as site_data_dir
|
||||
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
||||
|
||||
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = site_data_dir(appname, appauthor)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
else:
|
||||
# XDG default for $XDG_CONFIG_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
|
||||
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
|
||||
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific cache dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Cache" to the base app data dir for Windows. See
|
||||
discussion below.
|
||||
|
||||
Typical user cache directories are:
|
||||
Mac OS X: ~/Library/Caches/<AppName>
|
||||
Unix: ~/.cache/<AppName> (XDG default)
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings go in
|
||||
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
||||
app data dir (the default returned by `user_data_dir` above). Apps typically
|
||||
put cache data somewhere *under* the given dir here. Some examples:
|
||||
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
||||
...\Acme\SuperApp\Cache\1.0
|
||||
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
if opinion:
|
||||
path = os.path.join(path, "Cache")
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Caches')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific state dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user state directories are:
|
||||
Mac OS X: same as user_data_dir
|
||||
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
|
||||
Win *: same as user_data_dir
|
||||
|
||||
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
|
||||
to extend the XDG spec and support $XDG_STATE_HOME.
|
||||
|
||||
That means, by default "~/.local/state/<AppName>".
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = user_data_dir(appname, appauthor, None, roaming)
|
||||
else:
|
||||
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific log dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Logs" to the base app data dir for Windows, and "log" to the
|
||||
base cache dir for Unix. See discussion below.
|
||||
|
||||
Typical user log directories are:
|
||||
Mac OS X: ~/Library/Logs/<AppName>
|
||||
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings
|
||||
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
||||
examples of what some windows apps use for a logs dir.)
|
||||
|
||||
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
||||
value for Windows and appends "log" to the user cache dir for Unix.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if system == "darwin":
|
||||
path = os.path.join(
|
||||
os.path.expanduser('~/Library/Logs'),
|
||||
appname)
|
||||
elif system == "win32":
|
||||
path = user_data_dir(appname, appauthor, version)
|
||||
version = False
|
||||
if opinion:
|
||||
path = os.path.join(path, "Logs")
|
||||
else:
|
||||
path = user_cache_dir(appname, appauthor, version)
|
||||
version = False
|
||||
if opinion:
|
||||
path = os.path.join(path, "log")
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
class AppDirs(object):
|
||||
"""Convenience wrapper for getting application dirs."""
|
||||
def __init__(self, appname=None, appauthor=None, version=None,
|
||||
roaming=False, multipath=False):
|
||||
self.appname = appname
|
||||
self.appauthor = appauthor
|
||||
self.version = version
|
||||
self.roaming = roaming
|
||||
self.multipath = multipath
|
||||
|
||||
@property
|
||||
def user_data_dir(self):
|
||||
return user_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
|
||||
@property
|
||||
def site_data_dir(self):
|
||||
return site_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_config_dir(self):
|
||||
return user_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
|
||||
@property
|
||||
def site_config_dir(self):
|
||||
return site_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_cache_dir(self):
|
||||
return user_cache_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
@property
|
||||
def user_state_dir(self):
|
||||
return user_state_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
@property
|
||||
def user_log_dir(self):
|
||||
return user_log_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
|
||||
#---- internal support stuff
|
||||
|
||||
def _get_win_folder_from_registry(csidl_name):
|
||||
"""This is a fallback technique at best. I'm not sure if using the
|
||||
registry for this guarantees us the correct answer for all CSIDL_*
|
||||
names.
|
||||
"""
|
||||
if PY3:
|
||||
import winreg as _winreg
|
||||
else:
|
||||
import _winreg
|
||||
|
||||
shell_folder_name = {
|
||||
"CSIDL_APPDATA": "AppData",
|
||||
"CSIDL_COMMON_APPDATA": "Common AppData",
|
||||
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
||||
}[csidl_name]
|
||||
|
||||
key = _winreg.OpenKey(
|
||||
_winreg.HKEY_CURRENT_USER,
|
||||
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
|
||||
)
|
||||
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
||||
return dir
|
||||
|
||||
|
||||
def _get_win_folder_with_pywin32(csidl_name):
|
||||
from win32com.shell import shellcon, shell
|
||||
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
||||
# Try to make this a unicode path because SHGetFolderPath does
|
||||
# not return unicode strings when there is unicode data in the
|
||||
# path.
|
||||
try:
|
||||
dir = unicode(dir)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in dir:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
try:
|
||||
import win32api
|
||||
dir = win32api.GetShortPathName(dir)
|
||||
except ImportError:
|
||||
pass
|
||||
except UnicodeError:
|
||||
pass
|
||||
return dir
|
||||
|
||||
|
||||
def _get_win_folder_with_ctypes(csidl_name):
|
||||
import ctypes
|
||||
|
||||
csidl_const = {
|
||||
"CSIDL_APPDATA": 26,
|
||||
"CSIDL_COMMON_APPDATA": 35,
|
||||
"CSIDL_LOCAL_APPDATA": 28,
|
||||
}[csidl_name]
|
||||
|
||||
buf = ctypes.create_unicode_buffer(1024)
|
||||
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in buf:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf2 = ctypes.create_unicode_buffer(1024)
|
||||
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
||||
buf = buf2
|
||||
|
||||
return buf.value
|
||||
|
||||
def _get_win_folder_with_jna(csidl_name):
|
||||
import array
|
||||
from com.sun import jna
|
||||
from com.sun.jna.platform import win32
|
||||
|
||||
buf_size = win32.WinDef.MAX_PATH * 2
|
||||
buf = array.zeros('c', buf_size)
|
||||
shell = win32.Shell32.INSTANCE
|
||||
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
|
||||
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in dir:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf = array.zeros('c', buf_size)
|
||||
kernel = win32.Kernel32.INSTANCE
|
||||
if kernel.GetShortPathName(dir, buf, buf_size):
|
||||
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
||||
|
||||
return dir
|
||||
|
||||
if system == "win32":
|
||||
try:
|
||||
import win32com.shell
|
||||
_get_win_folder = _get_win_folder_with_pywin32
|
||||
except ImportError:
|
||||
try:
|
||||
from ctypes import windll
|
||||
_get_win_folder = _get_win_folder_with_ctypes
|
||||
except ImportError:
|
||||
try:
|
||||
import com.sun.jna
|
||||
_get_win_folder = _get_win_folder_with_jna
|
||||
except ImportError:
|
||||
_get_win_folder = _get_win_folder_from_registry
|
||||
|
||||
|
||||
#---- self test code
|
||||
|
||||
if __name__ == "__main__":
|
||||
appname = "MyApp"
|
||||
appauthor = "MyCompany"
|
||||
|
||||
props = ("user_data_dir",
|
||||
"user_config_dir",
|
||||
"user_cache_dir",
|
||||
"user_state_dir",
|
||||
"user_log_dir",
|
||||
"site_data_dir",
|
||||
"site_config_dir")
|
||||
|
||||
print("-- app dirs %s --" % __version__)
|
||||
|
||||
print("-- app dirs (with optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor, version="1.0")
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'appauthor')")
|
||||
dirs = AppDirs(appname)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (with disabled 'appauthor')")
|
||||
dirs = AppDirs(appname, appauthor=False)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
1372
software/tools/pymcuprog/libs/intelhex/__init__.py
Normal file
1372
software/tools/pymcuprog/libs/intelhex/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
160
software/tools/pymcuprog/libs/intelhex/compat.py
Normal file
160
software/tools/pymcuprog/libs/intelhex/compat.py
Normal file
@@ -0,0 +1,160 @@
|
||||
# Copyright (c) 2011, Bernhard Leiner
|
||||
# Copyright (c) 2013-2018 Alexander Belchenko
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms,
|
||||
# with or without modification, are permitted provided
|
||||
# that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain
|
||||
# the above copyright notice, this list of conditions
|
||||
# and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce
|
||||
# the above copyright notice, this list of conditions
|
||||
# and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
# * Neither the name of the author nor the names
|
||||
# of its contributors may be used to endorse
|
||||
# or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
|
||||
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
|
||||
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||||
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
'''Compatibility functions for python 2 and 3.
|
||||
|
||||
@author Bernhard Leiner (bleiner AT gmail com)
|
||||
@author Alexander Belchenko (alexander belchenko AT gmail com)
|
||||
'''
|
||||
|
||||
__docformat__ = "javadoc"
|
||||
|
||||
|
||||
import sys, array
|
||||
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
# Python 3
|
||||
Python = 3
|
||||
|
||||
def asbytes(s):
|
||||
if isinstance(s, bytes):
|
||||
return s
|
||||
return s.encode('latin1')
|
||||
def asstr(s):
|
||||
if isinstance(s, str):
|
||||
return s
|
||||
return s.decode('latin1')
|
||||
|
||||
# for python >= 3.2 use 'tobytes', otherwise 'tostring'
|
||||
array_tobytes = array.array.tobytes if sys.version_info[1] >= 2 else array.array.tostring
|
||||
|
||||
IntTypes = (int,)
|
||||
StrType = str
|
||||
UnicodeType = str
|
||||
|
||||
range_g = range # range generator
|
||||
def range_l(*args): # range list
|
||||
return list(range(*args))
|
||||
|
||||
def dict_keys(dikt): # dict keys list
|
||||
return list(dikt.keys())
|
||||
def dict_keys_g(dikt): # dict keys generator
|
||||
return dikt.keys()
|
||||
def dict_items_g(dikt): # dict items generator
|
||||
return dikt.items()
|
||||
|
||||
from io import StringIO, BytesIO
|
||||
|
||||
def get_binary_stdout():
|
||||
return sys.stdout.buffer
|
||||
|
||||
def get_binary_stdin():
|
||||
return sys.stdin.buffer
|
||||
|
||||
else:
|
||||
# Python 2
|
||||
Python = 2
|
||||
|
||||
asbytes = str
|
||||
asstr = str
|
||||
|
||||
array_tobytes = array.array.tostring
|
||||
|
||||
IntTypes = (int, long)
|
||||
StrType = basestring
|
||||
UnicodeType = unicode
|
||||
|
||||
#range_g = xrange # range generator
|
||||
def range_g(*args):
|
||||
# we want to use xrange here but on python 2 it does not work with long ints
|
||||
try:
|
||||
return xrange(*args)
|
||||
except OverflowError:
|
||||
start = 0
|
||||
stop = 0
|
||||
step = 1
|
||||
n = len(args)
|
||||
if n == 1:
|
||||
stop = args[0]
|
||||
elif n == 2:
|
||||
start, stop = args
|
||||
elif n == 3:
|
||||
start, stop, step = args
|
||||
else:
|
||||
raise TypeError('wrong number of arguments in range_g call!')
|
||||
if step == 0:
|
||||
raise ValueError('step cannot be zero')
|
||||
if step > 0:
|
||||
def up(start, stop, step):
|
||||
while start < stop:
|
||||
yield start
|
||||
start += step
|
||||
return up(start, stop, step)
|
||||
else:
|
||||
def down(start, stop, step):
|
||||
while start > stop:
|
||||
yield start
|
||||
start += step
|
||||
return down(start, stop, step)
|
||||
|
||||
range_l = range # range list
|
||||
|
||||
def dict_keys(dikt): # dict keys list
|
||||
return dikt.keys()
|
||||
def dict_keys_g(dikt): # dict keys generator
|
||||
return dikt.keys()
|
||||
def dict_items_g(dikt): # dict items generator
|
||||
return dikt.items()
|
||||
|
||||
from cStringIO import StringIO
|
||||
BytesIO = StringIO
|
||||
|
||||
import os
|
||||
def _force_stream_binary(stream):
|
||||
"""Force binary mode for stream on Windows."""
|
||||
if os.name == 'nt':
|
||||
f_fileno = getattr(stream, 'fileno', None)
|
||||
if f_fileno:
|
||||
fileno = f_fileno()
|
||||
if fileno >= 0:
|
||||
import msvcrt
|
||||
msvcrt.setmode(fileno, os.O_BINARY)
|
||||
return stream
|
||||
|
||||
def get_binary_stdout():
|
||||
return _force_stream_binary(sys.stdout)
|
||||
|
||||
def get_binary_stdin():
|
||||
return _force_stream_binary(sys.stdin)
|
||||
64
software/tools/pymcuprog/libs/intelhex/getsizeof.py
Normal file
64
software/tools/pymcuprog/libs/intelhex/getsizeof.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# Recursive version sys.getsizeof(). Extendable with custom handlers.
|
||||
# Code from http://code.activestate.com/recipes/577504/
|
||||
# Created by Raymond Hettinger on Fri, 17 Dec 2010 (MIT)
|
||||
|
||||
import sys
|
||||
from itertools import chain
|
||||
from collections import deque
|
||||
try:
|
||||
from reprlib import repr
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
def total_size(o, handlers={}, verbose=False):
|
||||
""" Returns the approximate memory footprint an object and all of its contents.
|
||||
|
||||
Automatically finds the contents of the following builtin containers and
|
||||
their subclasses: tuple, list, deque, dict, set and frozenset.
|
||||
To search other containers, add handlers to iterate over their contents:
|
||||
|
||||
handlers = {SomeContainerClass: iter,
|
||||
OtherContainerClass: OtherContainerClass.get_elements}
|
||||
|
||||
"""
|
||||
dict_handler = lambda d: chain.from_iterable(d.items())
|
||||
all_handlers = {tuple: iter,
|
||||
list: iter,
|
||||
deque: iter,
|
||||
dict: dict_handler,
|
||||
set: iter,
|
||||
frozenset: iter,
|
||||
}
|
||||
all_handlers.update(handlers) # user handlers take precedence
|
||||
seen = set() # track which object id's have already been seen
|
||||
default_size = sys.getsizeof(0) # estimate sizeof object without __sizeof__
|
||||
|
||||
def sizeof(o):
|
||||
if id(o) in seen: # do not double count the same object
|
||||
return 0
|
||||
seen.add(id(o))
|
||||
s = sys.getsizeof(o, default_size)
|
||||
|
||||
if verbose:
|
||||
print(s, type(o), repr(o))#, file=stderr)
|
||||
|
||||
for typ, handler in all_handlers.items():
|
||||
if isinstance(o, typ):
|
||||
s += sum(map(sizeof, handler(o)))
|
||||
break
|
||||
return s
|
||||
|
||||
return sizeof(o)
|
||||
|
||||
|
||||
##### Example call #####
|
||||
|
||||
if __name__ == '__main__':
|
||||
#d = dict(a=1, b=2, c=3, d=[4,5,6,7], e='a string of chars')
|
||||
print("dict 3 elements")
|
||||
d = {0:0xFF, 1:0xEE, 2:0xCC}
|
||||
print(total_size(d, verbose=True))
|
||||
|
||||
#print("array 3 elements")
|
||||
#import array
|
||||
#print(total_size(array.array('B', b'\x01\x02\x03')))
|
||||
46
software/tools/pymcuprog/libs/pyedbglib/__init__.py
Normal file
46
software/tools/pymcuprog/libs/pyedbglib/__init__.py
Normal file
@@ -0,0 +1,46 @@
|
||||
"""
|
||||
Python EDBG protocol communication library
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
pyedbglib is a low-level protocol library for communicating with
|
||||
Microchip CMSIS-DAP based debuggers.
|
||||
|
||||
pyedbglib uses HIDAPI package with a USB-level driver such as libusb.
|
||||
|
||||
The protocol library has no application usage on its own, but provides
|
||||
USB-protocol-level tool drivers to applications such as pymcuprog.
|
||||
In general a two-stage stack implementation is required for using pyedbglib:
|
||||
|
||||
1. Create transport HID layer
|
||||
2. Create protocol implementation using this transport layer
|
||||
|
||||
All protocols implemented in the library generally take the transport layer
|
||||
as a parameter to their constructors.
|
||||
|
||||
To use pyedbglib as a library for applications, the following usage patterns
|
||||
can be used:
|
||||
|
||||
Import and instantiate transport object:
|
||||
|
||||
>>> from pyedbglib.hidtransport.hidtransportfactory import hid_transport
|
||||
>>> transport = hid_transport()
|
||||
|
||||
Connect to any nEDBG tool. Serial number and product are optional, but must
|
||||
be provided if more than one matching unit is connected:
|
||||
|
||||
>>> status = transport.connect(serial_number="", product="nedbg")
|
||||
|
||||
Example of application using housekeeping protocol to read out the target voltage:
|
||||
|
||||
>>> from pyedbglib.protocols.housekeepingprotocol import Jtagice3HousekeepingProtocol
|
||||
>>> housekeeper = Jtagice3HousekeepingProtocol(transport)
|
||||
>>> housekeeper.start_session()
|
||||
>>> voltage = housekeeper.get_le16(Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONTEXT_ANALOG,
|
||||
Jtagice3HousekeepingProtocol.HOUSEKEEPING_ANALOG_VTREF)
|
||||
>>> voltage = voltage / 1000.0
|
||||
>>> housekeeper.end_session()
|
||||
>>> print ("Target is running at {0:.02f}V".format(voltage))
|
||||
|
||||
"""
|
||||
import logging
|
||||
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
||||
@@ -0,0 +1,165 @@
|
||||
"""Base class for all HID transport mechanisms."""
|
||||
|
||||
from logging import getLogger
|
||||
from . import toolinfo
|
||||
|
||||
|
||||
class HidTool(object):
|
||||
"""
|
||||
Holds transport and DAP properties of a CMSIS-DAP debugger.
|
||||
|
||||
Used to select the debugger to use if multiple debuggers are connected.
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments
|
||||
# These are primary keys used to identify the debugger.
|
||||
|
||||
def __init__(self, vendor_id, product_id, serial_number, product_string="", manufacturer_string=""):
|
||||
self.logger = getLogger(__name__)
|
||||
self.interface_number = -1
|
||||
self.vendor_id = vendor_id
|
||||
self.product_id = product_id
|
||||
self.serial_number = serial_number
|
||||
self.product_string = product_string
|
||||
self.manufacturer_string = manufacturer_string
|
||||
self.firmware_version = ""
|
||||
self.device_vendor_id = ""
|
||||
self.device_name = ""
|
||||
self.packet_size = 64
|
||||
|
||||
def set_packet_size(self, packet_size):
|
||||
"""
|
||||
Sets the packet size
|
||||
|
||||
:param packet_size: bytes per packet
|
||||
"""
|
||||
self.packet_size = packet_size
|
||||
|
||||
def set_product_string(self, product_string):
|
||||
"""
|
||||
Sets the product string
|
||||
|
||||
:param product_string: product name string
|
||||
"""
|
||||
self.product_string = product_string
|
||||
|
||||
|
||||
class HidTransportBase(object):
|
||||
"""Base class for HID transports"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = getLogger(__name__)
|
||||
self.devices = []
|
||||
self.device = None
|
||||
self.detect_devices()
|
||||
self.connected = False
|
||||
|
||||
def __del__(self):
|
||||
# Make sure we always disconnect the HID connection
|
||||
self.disconnect()
|
||||
|
||||
def detect_devices(self):
|
||||
"""Raise error as this method needs to be overridden."""
|
||||
raise NotImplementedError("method needs to be defined by sub-class")
|
||||
|
||||
def get_matching_tools(self, serial_number_substring='', product=None):
|
||||
"""
|
||||
Returns a list of tools matching the given serial_number_substring and product.
|
||||
|
||||
:param serial_number_substring: can be an empty string or a subset of a serial number. Not case sensitive
|
||||
This function will do matching of the last part of the devices serial numbers to
|
||||
the serial_number_substring. Examples:
|
||||
'123' will match "MCHP3252000000043123" but not "MCP32520001230000000"
|
||||
'' will match any serial number
|
||||
:param product: product type to connect to. If None any tool matching the serial_number_substring
|
||||
will be returned
|
||||
:return: List of matching tools
|
||||
"""
|
||||
# Support systems which use None as the standard for a unspecified USB serial
|
||||
if serial_number_substring is None:
|
||||
serial_number_substring = ""
|
||||
|
||||
# Making serial_number_substring case insensitive
|
||||
serial_number_substring = serial_number_substring.lower()
|
||||
|
||||
# Support tool shortnames
|
||||
toolname_in_product_string = toolinfo.tool_shortname_to_product_string_name(product)
|
||||
if toolname_in_product_string is not None:
|
||||
# Making product name case insensitive
|
||||
toolname_in_product_string = toolname_in_product_string.lower()
|
||||
|
||||
matching_devices = []
|
||||
for device in self.devices:
|
||||
if toolname_in_product_string is None or device.product_string.lower().startswith(
|
||||
toolname_in_product_string):
|
||||
if device.serial_number.lower().endswith(serial_number_substring):
|
||||
matching_devices.append(device)
|
||||
|
||||
return matching_devices
|
||||
|
||||
def connect(self, serial_number=None, product=None):
|
||||
"""
|
||||
Makes a HID connection to a debugger
|
||||
|
||||
:param serial_number: instance serial number to connect to
|
||||
:param product: product type to connect to
|
||||
:return: True if successfully connected to a tool, False if not
|
||||
"""
|
||||
if self.connected:
|
||||
return True
|
||||
|
||||
device_count = len(self.devices)
|
||||
self.logger.debug("{:d} devices available".format(device_count))
|
||||
if device_count == 0:
|
||||
self.logger.error("No CMSIS-DAP devices found.")
|
||||
return False
|
||||
|
||||
matching_devices = self.get_matching_tools(serial_number_substring=serial_number, product=product)
|
||||
number_of_matching_devices = len(matching_devices)
|
||||
|
||||
# Did we find exactly 1 tool?
|
||||
if number_of_matching_devices != 1:
|
||||
log_str = "Found {:d} daps matching the filter serial = \"{}\" and product = \"{}\""
|
||||
self.logger.debug(log_str.format(number_of_matching_devices, serial_number, product))
|
||||
if number_of_matching_devices > 1:
|
||||
self.logger.error("Too many products found. Please specify one of:")
|
||||
for device in self.devices:
|
||||
self.logger.error(" > {:s} {:s}".format(device.product_string,
|
||||
device.serial_number))
|
||||
return False
|
||||
|
||||
# Everything is peachy, connect to the tool
|
||||
self.device = matching_devices[0]
|
||||
self.hid_connect(self.device)
|
||||
self.logger.debug("Connected OK")
|
||||
self.connected = True
|
||||
packet_size = toolinfo.get_default_report_size(self.device.product_id)
|
||||
self.device.set_packet_size(packet_size)
|
||||
self.hid_info()
|
||||
return True
|
||||
|
||||
def disconnect(self):
|
||||
"""Release the HID connection"""
|
||||
if self.connected:
|
||||
self.hid_disconnect()
|
||||
self.connected = False
|
||||
|
||||
def hid_connect(self, device):
|
||||
"""Raise error as this method needs to be overridden."""
|
||||
raise NotImplementedError("method needs to be defined by sub-class")
|
||||
|
||||
def hid_info(self):
|
||||
"""Raise error as this method needs to be overridden."""
|
||||
raise NotImplementedError("method needs to be defined by sub-class")
|
||||
|
||||
def hid_disconnect(self):
|
||||
"""Raise error as this method needs to be overridden."""
|
||||
raise NotImplementedError("method needs to be defined by sub-class")
|
||||
|
||||
def get_report_size(self):
|
||||
"""
|
||||
Get the packet size in bytes
|
||||
|
||||
:return: bytes per packet/report
|
||||
"""
|
||||
return self.device.packet_size
|
||||
@@ -0,0 +1,56 @@
|
||||
"""
|
||||
Factory for HID transport connections.
|
||||
|
||||
Currently supports only Cython/HIDAPI
|
||||
"""
|
||||
|
||||
import platform
|
||||
from logging import getLogger
|
||||
from ..pyedbglib_errors import PyedbglibNotSupportedError
|
||||
|
||||
|
||||
def hid_transport(library="hidapi"):
|
||||
"""
|
||||
Dispatch a transport layer for the OS in question
|
||||
|
||||
The transport layer is typically used to connect to a tool and then it is passed in as a parameter when creating
|
||||
protocol objects. An example where the transport layer is used to create an instance of the housekeepingprotocol
|
||||
for communication with the nEDBG debugger::
|
||||
|
||||
from pyedbglib.hidtransport.hidtransportfactory import hid_transport
|
||||
transport = hid_transport()
|
||||
connect_status = False
|
||||
try:
|
||||
connect_status = transport.connect(serial_number='', product='nedbg')
|
||||
except IOError as error:
|
||||
print("Unable to connect to USB device ({})".format(error))
|
||||
|
||||
if not connect_status:
|
||||
print("Unable to connect to USB device")
|
||||
|
||||
housekeeper = housekeepingprotocol.Jtagice3HousekeepingProtocol(transport)
|
||||
|
||||
:param library: Transport library to use, currently only 'hidapi' is supported which will use the libusb hidapi
|
||||
:type library: string
|
||||
:returns: Instance of transport layer object
|
||||
:rtype: class:cyhidapi:CyHidApiTransport
|
||||
"""
|
||||
logger = getLogger(__name__)
|
||||
operating_system = platform.system().lower()
|
||||
logger.debug("HID transport using library '{:s}' on OS '{:s}'".format(library, operating_system))
|
||||
|
||||
# HID API is the primary transport
|
||||
if library == 'hidapi':
|
||||
hid_api_supported_os = ['windows', 'darwin', 'linux', 'linux2']
|
||||
if operating_system in hid_api_supported_os:
|
||||
from .cyhidapi import CyHidApiTransport
|
||||
return CyHidApiTransport()
|
||||
|
||||
msg = "System '{0:s}' not implemented for library '{1:s}'".format(operating_system, library)
|
||||
logger.error(msg)
|
||||
raise PyedbglibNotSupportedError(msg)
|
||||
|
||||
# Other transports may include cmsis-dap DLL, atusbhid (dll or so) etc
|
||||
msg = "Transport library '{0}' not implemented.".format(library)
|
||||
logger.error(msg)
|
||||
raise PyedbglibNotSupportedError(msg)
|
||||
@@ -0,0 +1,94 @@
|
||||
"""Gathering of all known Microchip CMSIS-DAP debuggers and default EP sizes"""
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
# List of known useful HID/CMSIS-DAP tools
|
||||
# 3G tools:
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_JTAGICE3 = 0x2140
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_ATMELICE = 0x2141
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_POWERDEBUGGER = 0x2144
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_EDBG_A = 0x2111
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_ZERO = 0x2157
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_MASS_STORAGE = 0x2169
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_PUBLIC_EDBG_C = 0x216A
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_KRAKEN = 0x2170
|
||||
|
||||
# 4G tools:
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_MEDBG = 0x2145
|
||||
|
||||
# 5G tools:
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_NEDBG_HID_MSD_DGI_CDC = 0x2175
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_PICKIT4_HID_CDC = 0x2177
|
||||
USB_TOOL_DEVICE_PRODUCT_ID_SNAP_HID_CDC = 0x2180
|
||||
|
||||
# The Product String Names are used to identify the tool based on the USB
|
||||
# device product strings (i.e. these names are usually just a subset of the
|
||||
# actual product strings)
|
||||
TOOL_SHORTNAME_TO_USB_PRODUCT_STRING = {
|
||||
'atmelice': "Atmel-ICE",
|
||||
'powerdebugger': "Power Debugger",
|
||||
'pickit4': "MPLAB PICkit 4",
|
||||
'snap': "MPLAB Snap",
|
||||
'nedbg': "nEDBG",
|
||||
'jtagice3': "JTAGICE3",
|
||||
'medbg': "mEDBG",
|
||||
'edbg': "EDBG",
|
||||
}
|
||||
|
||||
def get_default_report_size(pid):
|
||||
"""
|
||||
Retrieve default EP report size based on known PIDs
|
||||
|
||||
:param pid: product ID
|
||||
:return: packet size
|
||||
"""
|
||||
logger = getLogger(__name__)
|
||||
hid_tools = [
|
||||
# 3G
|
||||
{'pid': USB_TOOL_DEVICE_PRODUCT_ID_JTAGICE3, 'default_report_size': 512},
|
||||
{'pid': USB_TOOL_DEVICE_PRODUCT_ID_ATMELICE, 'default_report_size': 512},
|
||||
{'pid': USB_TOOL_DEVICE_PRODUCT_ID_POWERDEBUGGER, 'default_report_size': 512},
|
||||
{'pid': USB_TOOL_DEVICE_PRODUCT_ID_EDBG_A, 'default_report_size': 512},
|
||||
# 4G
|
||||
{'pid': USB_TOOL_DEVICE_PRODUCT_ID_MEDBG, 'default_report_size': 64},
|
||||
# 5G
|
||||
{'pid': USB_TOOL_DEVICE_PRODUCT_ID_NEDBG_HID_MSD_DGI_CDC, 'default_report_size': 64},
|
||||
{'pid': USB_TOOL_DEVICE_PRODUCT_ID_PICKIT4_HID_CDC, 'default_report_size': 64},
|
||||
{'pid': USB_TOOL_DEVICE_PRODUCT_ID_SNAP_HID_CDC, 'default_report_size': 64}]
|
||||
|
||||
logger.debug("Looking up report size for pid 0x{:04X}".format(pid))
|
||||
for tool in hid_tools:
|
||||
if tool['pid'] == pid:
|
||||
logger.debug("Default report size is {:d}".format(tool['default_report_size']))
|
||||
return tool['default_report_size']
|
||||
logger.debug("PID not found! Reverting to 64b.")
|
||||
return 64
|
||||
|
||||
def tool_shortname_to_product_string_name(shortname):
|
||||
"""
|
||||
Mapping for common short names of tools to product string name
|
||||
|
||||
The intention is that this function is always run on the tool name and that the conversion
|
||||
only happens if the name is a known shortname. If the shortname is not known of if the name
|
||||
provided is already a valid Product string name then the provided shortname parameter will
|
||||
just be returned unchanged. So if the name already is a correct Product string name it is
|
||||
still safe to run this conversion funtion on it.
|
||||
|
||||
:param shortname: shortname typically used by atbackend (powerdebugger, atmelice etc.)
|
||||
:return: String to look for in USB product strings to identify the tool
|
||||
"""
|
||||
logger = getLogger(__name__)
|
||||
|
||||
if shortname is None:
|
||||
logger.debug("Tool shortname is None")
|
||||
# This is also valid as the user might have provided no tool name, but the conversion function
|
||||
# should still be valid
|
||||
return shortname
|
||||
|
||||
shortname_lower = shortname.lower()
|
||||
if shortname_lower not in TOOL_SHORTNAME_TO_USB_PRODUCT_STRING:
|
||||
logger.debug("%s is not a known tool shortname", shortname)
|
||||
# ...but it could be a valid Product string name already so no reason to report an error
|
||||
return shortname
|
||||
|
||||
return TOOL_SHORTNAME_TO_USB_PRODUCT_STRING[shortname_lower]
|
||||
144
software/tools/pymcuprog/libs/pyedbglib/protocols/avrcmsisdap.py
Normal file
144
software/tools/pymcuprog/libs/pyedbglib/protocols/avrcmsisdap.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""
|
||||
CMSIS-DAP wrapper for custom commands (using vendor extensions)
|
||||
This mechanism is used to pass JTAGICE3-style commands for AVR devices
|
||||
over the CMSIS-DAP interface
|
||||
"""
|
||||
import time
|
||||
from logging import getLogger
|
||||
from ..util.binary import unpack_be16
|
||||
from ..util import print_helpers
|
||||
from .cmsisdap import CmsisDapUnit
|
||||
|
||||
|
||||
class AvrCommandError(Exception):
|
||||
"""
|
||||
Exception type for AVR command-response wrapping
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class AvrCommand(CmsisDapUnit):
|
||||
"""
|
||||
Wraps AVR command and responses
|
||||
"""
|
||||
|
||||
# Vendor Commands used to transport AVR over CMSIS-DAP
|
||||
AVR_COMMAND = 0x80
|
||||
AVR_RESPONSE = 0x81
|
||||
AVR_EVENT = 0x82
|
||||
AVR_MORE_FRAGMENTS = 0x00
|
||||
AVR_FINAL_FRAGMENT = 0x01
|
||||
|
||||
# Retry delay on AVR receive frame
|
||||
AVR_RETRY_DELAY_MS = 50
|
||||
|
||||
def __init__(self, transport, no_timeouts=False):
|
||||
self.no_timeouts = no_timeouts
|
||||
self.timeout = 1000
|
||||
CmsisDapUnit.__init__(self, transport)
|
||||
self.ep_size = transport.get_report_size()
|
||||
self.logger = getLogger(__name__)
|
||||
self.logger.debug("Created AVR command on DAP wrapper")
|
||||
|
||||
def poll_events(self):
|
||||
"""
|
||||
Polling for events from AVRs
|
||||
|
||||
:return: response from events
|
||||
"""
|
||||
self.logger.debug("Polling AVR events")
|
||||
resp = self.dap_command_response(bytearray([self.AVR_EVENT]))
|
||||
return resp
|
||||
|
||||
def _avr_response_receive_frame(self):
|
||||
retries = int(self.timeout / self.AVR_RETRY_DELAY_MS)
|
||||
# Get the delay in seconds
|
||||
delay = self.AVR_RETRY_DELAY_MS / 1000
|
||||
while retries or self.no_timeouts:
|
||||
resp = self.dap_command_response(bytearray([self.AVR_RESPONSE]))
|
||||
if resp[0] != self.AVR_RESPONSE:
|
||||
# Response received is not valid. Abort.
|
||||
raise AvrCommandError("AVR response DAP command failed; invalid token: 0x{:02X}".format(resp[0]))
|
||||
if resp[1] != 0x00:
|
||||
return resp
|
||||
self.logger.debug("Resp: %s", print_helpers.bytelist_to_hex_string(resp))
|
||||
|
||||
# Delay in seconds
|
||||
time.sleep(delay)
|
||||
retries -= 1
|
||||
raise AvrCommandError("AVR response timeout")
|
||||
|
||||
# Chops command up into fragments
|
||||
def _fragment_command_packet(self, command_packet):
|
||||
packets_total = int((len(command_packet) / (self.ep_size - 4)) + 1)
|
||||
self.logger.debug("Fragmenting AVR command into {:d} chunks".format(packets_total))
|
||||
fragments = []
|
||||
for i in range(0, packets_total):
|
||||
command_fragment = bytearray([self.AVR_COMMAND, ((i + 1) << 4) + packets_total])
|
||||
if (len(command_packet) - (i * (self.ep_size - 4))) > (self.ep_size - 4):
|
||||
length = self.ep_size - 4
|
||||
else:
|
||||
length = len(command_packet) - (i * (self.ep_size - 4))
|
||||
|
||||
command_fragment.append(int(length >> 8))
|
||||
command_fragment.append(int(length & 0xFF))
|
||||
|
||||
for j in range(0, self.ep_size - 4):
|
||||
if j < length:
|
||||
command_fragment.append(command_packet[i * (self.ep_size - 4) + j])
|
||||
else:
|
||||
command_fragment.append(0x00)
|
||||
|
||||
fragments.append(command_fragment)
|
||||
return fragments
|
||||
|
||||
# Sends an AVR command and waits for response
|
||||
def avr_command_response(self, command):
|
||||
"""
|
||||
Sends an AVR command and receives a response
|
||||
|
||||
:param command: Command bytes to send
|
||||
:return: Response bytes received
|
||||
"""
|
||||
fragments = self._fragment_command_packet(command)
|
||||
self.logger.debug("Sending AVR command")
|
||||
for fragment in fragments:
|
||||
self.logger.debug("Sending AVR command 0x{:02X}".format(fragment[0]))
|
||||
resp = self.dap_command_response(fragment)
|
||||
if resp[0] != self.AVR_COMMAND:
|
||||
raise AvrCommandError("AVR command DAP command failed; invalid token: 0x{:02X}".format(resp[0]))
|
||||
if fragment == fragments[-1]:
|
||||
if resp[1] != self.AVR_FINAL_FRAGMENT:
|
||||
raise AvrCommandError(
|
||||
"AVR command DAP command failed; invalid final fragment ack: 0x{:02X}".format(resp[1]))
|
||||
else:
|
||||
if resp[1] != self.AVR_MORE_FRAGMENTS:
|
||||
raise AvrCommandError(
|
||||
"AVR command DAP command failed; invalid non-final fragment ack: 0x{:02X}".format(resp[1]))
|
||||
|
||||
# Receive response
|
||||
fragment_info, _, response = self._avr_response_receive_fragment()
|
||||
packets_remaining = (fragment_info & 0xF) - 1
|
||||
for _ in range(0, packets_remaining):
|
||||
fragment_info, _, data = self._avr_response_receive_fragment()
|
||||
response.extend(data)
|
||||
return response
|
||||
|
||||
def _avr_response_receive_fragment(self):
|
||||
fragment = []
|
||||
# Receive a frame
|
||||
response = self._avr_response_receive_frame()
|
||||
|
||||
# Get the payload size from the header information
|
||||
size = unpack_be16(response[2:4])
|
||||
|
||||
# The message header is 4 bytes, where the last two hold the size of the payload
|
||||
if len(response) < (4 + size):
|
||||
raise AvrCommandError("Response size does not match the header information.")
|
||||
|
||||
# Extract data
|
||||
for i in range(0, size):
|
||||
fragment.append(response[4 + i])
|
||||
|
||||
fragment_info = response[1]
|
||||
return fragment_info, size, fragment
|
||||
543
software/tools/pymcuprog/libs/pyedbglib/protocols/cmsisdap.py
Normal file
543
software/tools/pymcuprog/libs/pyedbglib/protocols/cmsisdap.py
Normal file
@@ -0,0 +1,543 @@
|
||||
"""
|
||||
CMSIS DAP access protocol
|
||||
|
||||
Interfaces with CMSIS-DAP standard debuggers over HID
|
||||
"""
|
||||
|
||||
import time
|
||||
from logging import getLogger
|
||||
|
||||
from .dapwrapper import DapWrapper
|
||||
from ..util import binary
|
||||
from ..pyedbglib_errors import PyedbglibError
|
||||
|
||||
|
||||
class CmsisDapUnit(DapWrapper):
|
||||
"""Communicates with a DAP via standard CMSIS-DAP firmware stack over HID transport"""
|
||||
|
||||
# DAP command constants
|
||||
ID_DAP_Info = 0x00
|
||||
ID_DAP_HostStatus = 0x01
|
||||
ID_DAP_Connect = 0x02
|
||||
ID_DAP_Disconnect = 0x03
|
||||
ID_DAP_TransferConfigure = 0x04
|
||||
ID_DAP_Transfer = 0x05
|
||||
ID_DAP_TransferBlock = 0x06
|
||||
ID_DAP_TransferAbort = 0x07
|
||||
ID_DAP_WriteABORT = 0x08
|
||||
ID_DAP_Delay = 0x09
|
||||
ID_DAP_ResetTarget = 0x0A
|
||||
ID_DAP_SWJ_Pins = 0x10
|
||||
ID_DAP_SWJ_Clock = 0x11
|
||||
ID_DAP_SWJ_Sequence = 0x12
|
||||
ID_DAP_SWD_Configure = 0x13
|
||||
ID_DAP_JTAG_Sequence = 0x14
|
||||
ID_DAP_JTAG_Configure = 0x15
|
||||
ID_DAP_JTAG_IDCODE = 0x16
|
||||
|
||||
# DAP responses
|
||||
DAP_OK = 0x00
|
||||
DAP_ERROR = 0xff
|
||||
|
||||
# DAP info fields
|
||||
DAP_ID_VENDOR = 0x01
|
||||
DAP_ID_PRODUCT = 0x02
|
||||
DAP_ID_SER_NUM = 0x03
|
||||
DAP_ID_FW_VER = 0x04
|
||||
DAP_ID_DEVICE_VENDOR = 0x05
|
||||
DAP_ID_DEVICE_NAME = 0x06
|
||||
DAP_ID_CAPABILITIES = 0xF0
|
||||
DAP_ID_PACKET_COUNT = 0xFE
|
||||
DAP_ID_PACKET_SIZE = 0xFF
|
||||
|
||||
# DAP ports
|
||||
DAP_PORT_AUTODETECT = 0
|
||||
DAP_PORT_DISABLED = 0
|
||||
DAP_PORT_SWD = 1
|
||||
DAP_PORT_JTAG = 2
|
||||
|
||||
def __init__(self, transport):
|
||||
self.logger = getLogger(__name__)
|
||||
DapWrapper.__init__(self, transport)
|
||||
|
||||
def _check_response(self, cmd, rsp):
|
||||
"""
|
||||
Checks that the response echoes the command
|
||||
|
||||
:param cmd: command going in
|
||||
:param rsp: response coming out
|
||||
"""
|
||||
self.logger.debug("Checking response: cmd=0x%02X rsp=0x%02X", cmd[0], rsp[0])
|
||||
if cmd[0] != rsp[0]:
|
||||
raise PyedbglibError("Invalid response header")
|
||||
|
||||
def dap_info(self):
|
||||
"""Collects the dap info"""
|
||||
info = {
|
||||
'vendor': self._dap_info_field(self.DAP_ID_VENDOR),
|
||||
'product': self._dap_info_field(self.DAP_ID_PRODUCT),
|
||||
'serial': self._dap_info_field(self.DAP_ID_SER_NUM),
|
||||
'fw': self._dap_info_field(self.DAP_ID_FW_VER),
|
||||
'device_vendor': self._dap_info_field(self.DAP_ID_DEVICE_VENDOR),
|
||||
'device_name': self._dap_info_field(self.DAP_ID_DEVICE_NAME),
|
||||
'capabilities': self._dap_info_field(self.DAP_ID_CAPABILITIES)
|
||||
}
|
||||
return info
|
||||
|
||||
def _dap_info_field(self, field):
|
||||
"""
|
||||
Queries one field from the dap info
|
||||
|
||||
:param field: which field to query
|
||||
"""
|
||||
self.logger.debug("dap_info (%d)", field)
|
||||
cmd = bytearray(2)
|
||||
cmd[0] = self.ID_DAP_Info
|
||||
cmd[1] = field
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
return (rsp[2:rsp[1] + 2].decode()).strip('\0')
|
||||
|
||||
def dap_led(self, index, state):
|
||||
"""
|
||||
Operates the LED
|
||||
|
||||
:param index: which led
|
||||
:param state: what to do with it
|
||||
:return:
|
||||
"""
|
||||
self.logger.debug("dap_led (%d, %d)", index, state)
|
||||
cmd = bytearray(3)
|
||||
cmd[0] = self.ID_DAP_HostStatus
|
||||
cmd[1] = index
|
||||
cmd[2] = state
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
|
||||
def dap_connect(self):
|
||||
"""Connects to the DAP"""
|
||||
self.logger.debug("dap_connect (SWD)")
|
||||
cmd = bytearray(2)
|
||||
cmd[0] = self.ID_DAP_Connect
|
||||
cmd[1] = self.DAP_PORT_SWD
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != self.DAP_PORT_SWD:
|
||||
raise PyedbglibError("Connect failed (0x{0:02X})".format(rsp[1]))
|
||||
|
||||
def dap_disconnect(self):
|
||||
"""Disconnects from the DAP"""
|
||||
self.logger.debug("dap_disconnect")
|
||||
cmd = bytearray(1)
|
||||
cmd[0] = self.ID_DAP_Disconnect
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
|
||||
|
||||
class CmsisDapDebugger(CmsisDapUnit):
|
||||
"""ARM-specific cmsis-dap implementation"""
|
||||
|
||||
# SWJ pin IDs
|
||||
DAP_SWJ_SWCLK_TCK = (1 << 0)
|
||||
DAP_SWJ_SWDIO_TMS = (1 << 1)
|
||||
DAP_SWJ_TDI = (1 << 2)
|
||||
DAP_SWJ_TDO = (1 << 3)
|
||||
DAP_SWJ_nTRST = (1 << 5)
|
||||
DAP_SWJ_nRESET = (1 << 7)
|
||||
|
||||
# DAP transfer types
|
||||
DAP_TRANSFER_APnDP = (1 << 0)
|
||||
DAP_TRANSFER_RnW = (1 << 1)
|
||||
DAP_TRANSFER_A2 = (1 << 2)
|
||||
DAP_TRANSFER_A3 = (1 << 3)
|
||||
DAP_TRANSFER_MATCH_VALUE = (1 << 4)
|
||||
DAP_TRANSFER_MATCH_MASK = (1 << 5)
|
||||
|
||||
# DAP transfer responses
|
||||
DAP_TRANSFER_INVALID = 0
|
||||
DAP_TRANSFER_OK = (1 << 0)
|
||||
DAP_TRANSFER_WAIT = (1 << 1)
|
||||
DAP_TRANSFER_FAULT = (1 << 2)
|
||||
DAP_TRANSFER_ERROR = (1 << 3)
|
||||
DAP_TRANSFER_MISMATCH = (1 << 4)
|
||||
|
||||
# DP definitions
|
||||
DP_IDCODE = 0x00
|
||||
DP_ABORT = 0x00
|
||||
DP_CTRL_STAT = 0x04
|
||||
DP_WCR = 0x04
|
||||
DP_SELECT = 0x08
|
||||
DP_RESEND = 0x08
|
||||
DP_RDBUFF = 0x0C
|
||||
|
||||
# JTAG-specific codes
|
||||
JTAG_ABORT = 0x08
|
||||
JTAG_DPACC = 0x0A
|
||||
JTAG_APACC = 0x0B
|
||||
JTAG_IDCODE = 0x0E
|
||||
JTAG_BYPASS = 0x0F
|
||||
|
||||
# SWD-specific codes
|
||||
SWD_AP_CSW = 0x00
|
||||
SWD_AP_TAR = 0x04
|
||||
SWD_AP_DRW = 0x0C
|
||||
|
||||
# TAR size
|
||||
TAR_MAX = 0x400
|
||||
|
||||
# DAP CTRL_STAT bits
|
||||
# Source: Coresight Techref
|
||||
CSYSPWRUPACK = (1 << 31)
|
||||
CSYSPWRUPREQ = (1 << 30)
|
||||
CDBGPWRUPACK = (1 << 29)
|
||||
CDBGPWRUPREQ = (1 << 28)
|
||||
CDBGRSTACK = (1 << 27)
|
||||
CDBGRSTREQ = (1 << 26)
|
||||
WDATAERR = (1 << 7)
|
||||
READOK = (1 << 6)
|
||||
STICKYERR = (1 << 5)
|
||||
STICKYCMP = (1 << 4)
|
||||
TRNMODE = (1 << 2)
|
||||
STICKYORUN = (1 << 1)
|
||||
ORUNDETECT = (1 << 0)
|
||||
|
||||
# Useful CSW settings
|
||||
CSW_32BIT = 0x02
|
||||
CSW_16BIT = 0x01
|
||||
CSW_8BIT = 0x00
|
||||
CSW_ADDRINC_OFF = 0x00
|
||||
CSW_ADDRINC_ON = (1 << 4)
|
||||
|
||||
# Supported DAP IDs.
|
||||
CM0P_DAPID = 0x0BC11477
|
||||
|
||||
def __init__(self, transport):
|
||||
self.logger = getLogger(__name__)
|
||||
CmsisDapUnit.__init__(self, transport)
|
||||
|
||||
def dap_swj_clock(self, clock):
|
||||
"""
|
||||
Sets up the SWD clock timing
|
||||
|
||||
:param clock: clock value in Hz
|
||||
"""
|
||||
self.logger.debug("dap_swj_clk (%d)", clock)
|
||||
cmd = bytearray(1)
|
||||
cmd[0] = self.ID_DAP_SWJ_Clock
|
||||
cmd.extend(binary.pack_le32(clock))
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != self.DAP_OK:
|
||||
raise PyedbglibError("SWJ clock setting failed (0x{0:02X})".format(rsp[1]))
|
||||
|
||||
def dap_transfer_configure(self, idle, count, retry):
|
||||
"""
|
||||
Configures SWD transfers
|
||||
|
||||
:param idle: idle cycles
|
||||
:param count: retry count
|
||||
:param retry: match retry value
|
||||
:return:
|
||||
"""
|
||||
self.logger.debug("dap_transfer_configure (%d, %d, %d)", idle, count, retry)
|
||||
cmd = bytearray(2)
|
||||
cmd[0] = self.ID_DAP_TransferConfigure
|
||||
cmd[1] = idle
|
||||
cmd.extend(binary.pack_le16(count))
|
||||
cmd.extend(binary.pack_le16(retry))
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != self.DAP_OK:
|
||||
raise PyedbglibError("Transfer configure failed (0x{0:02X})".format(rsp[1]))
|
||||
|
||||
def dap_swd_configure(self, cfg):
|
||||
"""
|
||||
Configures the SWD interface
|
||||
|
||||
:param cfg: turnaround and data phase config parameters
|
||||
"""
|
||||
self.logger.debug("dap_swd_configure (%d)", cfg)
|
||||
cmd = bytearray(2)
|
||||
cmd[0] = self.ID_DAP_SWD_Configure
|
||||
cmd[1] = cfg
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != self.DAP_OK:
|
||||
raise PyedbglibError("SWD configure failed (0x{0:02X})".format(rsp[1]))
|
||||
|
||||
def dap_reset_target(self):
|
||||
"""Reset the target using the DAP"""
|
||||
self.logger.debug("dap_reset_target")
|
||||
cmd = bytearray(1)
|
||||
cmd[0] = self.ID_DAP_ResetTarget
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != self.DAP_OK:
|
||||
raise PyedbglibError("Reset target failed (0x{0:02X})".format(rsp[1]))
|
||||
|
||||
def dap_read_reg(self, reg):
|
||||
"""
|
||||
Reads a DAP AP/DP register
|
||||
|
||||
:param reg: register to read
|
||||
"""
|
||||
self.logger.debug("dap_read_reg (0x%02X)", reg)
|
||||
cmd = bytearray(8)
|
||||
cmd[0] = self.ID_DAP_Transfer
|
||||
cmd[1] = 0x00 # dap
|
||||
cmd[2] = 0x01 # 1 word
|
||||
cmd[3] = reg | self.DAP_TRANSFER_RnW
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != 1 or rsp[2] != self.DAP_TRANSFER_OK:
|
||||
raise PyedbglibError("Read reg failed (0x{0:02X}, {1:02X})".format(rsp[1], rsp[2]))
|
||||
value = binary.unpack_le32(rsp[3:7])
|
||||
return value
|
||||
|
||||
def dap_write_reg(self, reg, value):
|
||||
"""
|
||||
Writes a DAP AP/DP register
|
||||
|
||||
:param reg: register to write
|
||||
:param value: value to write
|
||||
"""
|
||||
self.logger.debug("dap_write_reg (0x%02X) = 0x%08X", reg, value)
|
||||
cmd = bytearray(4)
|
||||
cmd[0] = self.ID_DAP_Transfer
|
||||
cmd[1] = 0x00 # dap
|
||||
cmd[2] = 0x01 # 1 word
|
||||
cmd[3] = reg
|
||||
cmd.extend(binary.pack_le32(value))
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != 1 or rsp[2] != self.DAP_TRANSFER_OK:
|
||||
raise PyedbglibError("Write reg failed (0x{0:02X}, {1:02X})".format(rsp[1], rsp[2]))
|
||||
|
||||
def read_word(self, address):
|
||||
"""
|
||||
Reads a word from the device memory bus
|
||||
|
||||
:param address: address to read
|
||||
"""
|
||||
self.logger.debug("read word at 0x%08X", address)
|
||||
self.dap_write_reg(self.SWD_AP_TAR | self.DAP_TRANSFER_APnDP, address)
|
||||
return self.dap_read_reg(self.SWD_AP_DRW | self.DAP_TRANSFER_APnDP)
|
||||
|
||||
def write_word(self, address, data):
|
||||
"""
|
||||
Writes a word to the device memory bus
|
||||
|
||||
:param address: address to write
|
||||
:param data: data to write
|
||||
"""
|
||||
self.logger.debug("write word at 0x%08X = 0x%08X", address, data)
|
||||
self.dap_write_reg(self.SWD_AP_TAR | self.DAP_TRANSFER_APnDP, address)
|
||||
self.dap_write_reg(self.SWD_AP_DRW | self.DAP_TRANSFER_APnDP, data)
|
||||
|
||||
@staticmethod
|
||||
def multiple_of_four(x):
|
||||
""" 4 byte boundary """
|
||||
return x & ~0x03
|
||||
|
||||
def read_block(self, address, numbytes):
|
||||
"""
|
||||
Reads a block from the device memory bus
|
||||
|
||||
:param address: byte address
|
||||
:param numbytes: number of bytes
|
||||
"""
|
||||
self.logger.debug("Block read of %d bytes at address 0x%08X", numbytes, address)
|
||||
# Collect results here
|
||||
result = bytearray()
|
||||
# In chunks of (len-header)
|
||||
max_payload_size_bytes = self.multiple_of_four(self.transport.get_report_size() - 5)
|
||||
self.logger.debug("Max payload size of %d bytes", max_payload_size_bytes)
|
||||
while numbytes:
|
||||
# Calculate read size
|
||||
read_size_bytes = max_payload_size_bytes
|
||||
|
||||
# Last chunk?
|
||||
if read_size_bytes > numbytes:
|
||||
read_size_bytes = numbytes
|
||||
|
||||
# Too large for TAR?
|
||||
tar_max_chunk = self.TAR_MAX - (address - (address & (1-self.TAR_MAX)))
|
||||
if read_size_bytes > tar_max_chunk:
|
||||
read_size_bytes = tar_max_chunk
|
||||
|
||||
# Log
|
||||
self.logger.debug("Read %d bytes from TAR address 0x%08X", read_size_bytes, address)
|
||||
|
||||
# Set TAR
|
||||
self.dap_write_reg(self.SWD_AP_TAR | self.DAP_TRANSFER_APnDP, address)
|
||||
|
||||
# Read chunk
|
||||
cmd = bytearray(2)
|
||||
cmd[0] = self.ID_DAP_TransferBlock
|
||||
cmd[1] = 0x00
|
||||
cmd.extend(binary.pack_le16(read_size_bytes // 4))
|
||||
cmd.extend([self.SWD_AP_DRW | self.DAP_TRANSFER_RnW | self.DAP_TRANSFER_APnDP])
|
||||
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
|
||||
# Check outcome
|
||||
if rsp[3] != self.DAP_TRANSFER_OK:
|
||||
raise PyedbglibError("Transfer failed (0x{0:02X}) address 0x{1:08X}".format(rsp[3], address))
|
||||
|
||||
# Extract payload
|
||||
num_words_read = binary.unpack_le16(rsp[1:3])
|
||||
|
||||
# Check
|
||||
if num_words_read * 4 != read_size_bytes:
|
||||
raise PyedbglibError(
|
||||
"Unexpected number of bytes returned from block read ({0:d} != {1:d})".format(num_words_read * 4,
|
||||
read_size_bytes))
|
||||
|
||||
# Extend results
|
||||
result.extend(rsp[4:4 + read_size_bytes])
|
||||
numbytes -= read_size_bytes
|
||||
address += read_size_bytes
|
||||
|
||||
return result
|
||||
|
||||
def write_block(self, address, data):
|
||||
"""
|
||||
Writes a block to the device memory bus
|
||||
|
||||
:param address: byte address
|
||||
:param data: data
|
||||
"""
|
||||
self.logger.debug("Block write of %d bytes at address 0x%08X", len(data), address)
|
||||
|
||||
# In chunks of (len-header)
|
||||
max_payload_size_bytes = self.multiple_of_four(self.transport.get_report_size() - 5)
|
||||
while data:
|
||||
# Calculate write size
|
||||
write_size_bytes = max_payload_size_bytes
|
||||
if write_size_bytes > len(data):
|
||||
write_size_bytes = len(data)
|
||||
|
||||
# Too large for TAR?
|
||||
tar_max_chunk = self.TAR_MAX - (address - (address & (1 - self.TAR_MAX)))
|
||||
if write_size_bytes > tar_max_chunk:
|
||||
write_size_bytes = tar_max_chunk
|
||||
|
||||
# Set TAR
|
||||
self.dap_write_reg(self.SWD_AP_TAR | self.DAP_TRANSFER_APnDP, address)
|
||||
|
||||
cmd = bytearray(2)
|
||||
cmd[0] = self.ID_DAP_TransferBlock
|
||||
cmd[1] = 0x00
|
||||
cmd.extend(binary.pack_le16(write_size_bytes // 4))
|
||||
cmd.extend([self.SWD_AP_DRW | self.DAP_TRANSFER_APnDP])
|
||||
cmd.extend(data[0:write_size_bytes])
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
|
||||
# Shrink data buffer
|
||||
data = data[write_size_bytes:]
|
||||
address += write_size_bytes
|
||||
|
||||
def _send_flush_tms(self):
|
||||
cmd = bytearray(2)
|
||||
cmd[0] = self.ID_DAP_SWJ_Sequence
|
||||
cmd[1] = 7 * 8
|
||||
for _ in range(7):
|
||||
cmd.extend([0xff])
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != self.DAP_OK:
|
||||
raise PyedbglibError("SWJ sequence failed (0x{0:02X})".format(rsp[1]))
|
||||
|
||||
def init_swj(self):
|
||||
"""Magic sequence to execute on pins to enable SWD in case of JTAG-default parts"""
|
||||
self.logger.debug("SWJ init sequence")
|
||||
# According to ARM manuals:
|
||||
# Send at least 50 cycles with TMS=1
|
||||
self._send_flush_tms()
|
||||
|
||||
# Send 16-bit switching code
|
||||
cmd = bytearray(2)
|
||||
cmd[0] = self.ID_DAP_SWJ_Sequence
|
||||
cmd[1] = 16
|
||||
cmd.extend(binary.pack_le16(0xE79E))
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != self.DAP_OK:
|
||||
raise PyedbglibError("SWJ sequence failed (0x{0:02X})".format(rsp[1]))
|
||||
|
||||
# Flush TMS again
|
||||
self._send_flush_tms()
|
||||
|
||||
# Set data low again
|
||||
cmd = bytearray(3)
|
||||
cmd[0] = self.ID_DAP_SWJ_Sequence
|
||||
cmd[1] = 1
|
||||
cmd[2] = 0x00
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
if rsp[1] != self.DAP_OK:
|
||||
raise PyedbglibError("SWJ sequence failed (0x{0:02X})".format(rsp[1]))
|
||||
|
||||
# Now read the ID to check that it has switched
|
||||
dap_id = self.dap_read_idcode()
|
||||
if dap_id != self.CM0P_DAPID:
|
||||
raise PyedbglibError("Invalid SWD DAP ID code! Only M0+ is currently supported.")
|
||||
|
||||
def dap_read_idcode(self):
|
||||
"""Reads the IDCODE from the SWD DP"""
|
||||
self.logger.debug("reading swd idcode")
|
||||
return self.dap_read_reg(self.DP_IDCODE)
|
||||
|
||||
def dap_target_init(self):
|
||||
"""Configures the DAP for use"""
|
||||
self.logger.debug("dap_target_init")
|
||||
# Clear all stickies
|
||||
self.dap_write_reg(self.DP_ABORT, self.STICKYERR | self.STICKYCMP | self.STICKYORUN)
|
||||
# Select to 0
|
||||
self.dap_write_reg(self.DP_SELECT, 0)
|
||||
# Request debug power
|
||||
self.dap_write_reg(self.DP_CTRL_STAT, self.CDBGPWRUPREQ | self.CSYSPWRUPREQ)
|
||||
# Most useful default of 32-bit word access with auto-increment enabled
|
||||
self.dap_write_reg(self.SWD_AP_CSW | self.DAP_TRANSFER_APnDP, self.CSW_ADDRINC_ON | self.CSW_32BIT)
|
||||
|
||||
|
||||
class CmsisDapSamDebugger(CmsisDapDebugger):
|
||||
"""SAM specific CMSIS-DAP debugger"""
|
||||
|
||||
def dap_reset_ext(self, extend=False):
|
||||
"""
|
||||
Reset the target using the hardware
|
||||
|
||||
Some SAM devices (for example SAMDx and SAMLx) have an additional 'reset extension' capability which is not part
|
||||
of the CMSIS-DAP standard. It is used to prevent the device from running after reset and then overriding its
|
||||
SWD IO. The procedure is simply to hold SW_CLK low while releasing /RESET. This is done here using SWJ pins
|
||||
function IF the extend argument is set.
|
||||
|
||||
:param extend: boolean flag to extend reset
|
||||
"""
|
||||
self.logger.debug("dap_reset_ext")
|
||||
cmd = bytearray(7)
|
||||
cmd[0] = self.ID_DAP_SWJ_Pins
|
||||
cmd[1] = 0 # Reset LOW, TCK LOW
|
||||
cmd[2] = self.DAP_SWJ_nRESET
|
||||
if extend:
|
||||
cmd[2] |= self.DAP_SWJ_SWCLK_TCK
|
||||
cmd[3] = 0
|
||||
cmd[4] = 0
|
||||
cmd[5] = 0
|
||||
cmd[6] = 0
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
|
||||
cmd[1] = self.DAP_SWJ_nRESET # Reset high, TCK still low
|
||||
cmd[2] = self.DAP_SWJ_nRESET
|
||||
if extend:
|
||||
cmd[2] |= self.DAP_SWJ_SWCLK_TCK
|
||||
|
||||
rsp = self.dap_command_response(cmd)
|
||||
self._check_response(cmd, rsp)
|
||||
|
||||
# Allow Reset to be pulled high
|
||||
time.sleep(0.1)
|
||||
@@ -0,0 +1,38 @@
|
||||
"""Wrapper for any protocol over CMSIS-DAP"""
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
|
||||
class DapWrapper(object):
|
||||
"""Base class for any CMSIS-DAP protocol wrapper"""
|
||||
|
||||
def __init__(self, transport):
|
||||
self.logger = getLogger(__name__)
|
||||
self.transport = transport
|
||||
self.logger.debug("Created DapWrapper")
|
||||
|
||||
def dap_command_response(self, packet):
|
||||
"""
|
||||
Send a command, receive a response
|
||||
|
||||
:param packet: bytes to send
|
||||
:return: response received
|
||||
"""
|
||||
return self.transport.hid_transfer(packet)
|
||||
|
||||
def dap_command_write(self, packet):
|
||||
"""
|
||||
Send a packet
|
||||
|
||||
:param packet: packed data to sent
|
||||
:return: bytes sent
|
||||
"""
|
||||
return self.transport.hid_write(packet)
|
||||
|
||||
def dap_command_read(self):
|
||||
"""
|
||||
Receive data
|
||||
|
||||
:return: data received
|
||||
"""
|
||||
return self.transport.hid_read()
|
||||
@@ -0,0 +1,210 @@
|
||||
"""Implements EDBG Protocol, a sub-protocol in the JTAGICE3 family of protocols."""
|
||||
|
||||
from logging import getLogger
|
||||
from ..util.binary import unpack_be16
|
||||
from .jtagice3protocol import Jtagice3Protocol
|
||||
|
||||
|
||||
class EdbgProtocol(Jtagice3Protocol):
|
||||
"""Implements EDBG protocol functionality on the JTAGICE3 protocol family"""
|
||||
|
||||
CMD_EDBG_QUERY = 0x00 # Capability discovery
|
||||
CMD_EDBG_SET = 0x01 # Set parameters
|
||||
CMD_EDBG_GET = 0x02 # Get parameters
|
||||
|
||||
CMD_EDBG_PROGRAM_ID_CHIP = 0x50 # Programs an ID chip
|
||||
CMD_EDBG_REFRESH_ID_CHIP = 0x51 # Triggers ID chip refresh
|
||||
CMD_EDBG_READ_ID_CHIP = 0x7E # Retrieve ID chip info
|
||||
|
||||
AVR_GET_CONFIG = 0x83 # CMSIS vendor 3 get config command
|
||||
|
||||
RSP_EDBG_OK = 0x80 # All OK
|
||||
RSP_EDBG_LIST = 0x81 # List of items returned
|
||||
RSP_EDBG_DATA = 0x84 # Data returned
|
||||
RSP_EDBG_FAILED = 0xA0 # Command failed to execute
|
||||
|
||||
EDBG_QUERY_COMMANDS = 0x00
|
||||
|
||||
EDBG_CTXT_CONTROL = 0x00 # Control
|
||||
EDBG_CONTROL_LED_USAGE = 0x00
|
||||
EDBG_CONTROL_EXT_PROG = 0x01
|
||||
EDBG_CONTROL_TARGET_POWER = 0x10
|
||||
|
||||
EDBG_CONFIG_KIT_DATA = 0x20 # Read the kit info flash page
|
||||
|
||||
"""Mapping EDBG error codes to more human friendly strings"""
|
||||
EDBG_ERRORS = {0: 'SUCCESS'}
|
||||
|
||||
"""Mapping SHA204 response codes to more human friendly strings"""
|
||||
RESPONSE_CODE = {0x00: 'SHA204_SUCCESS',
|
||||
0xD2: 'SHA204_PARSE_ERROR',
|
||||
0xD3: 'SHA204_CMD_FAIL',
|
||||
0xD4: 'SHA204_STATUS_CRC',
|
||||
0xE0: 'SHA204_FUNC_FAIL',
|
||||
0xE2: 'SHA204_BAD_PARAM',
|
||||
0xE4: 'SHA204_INVALID_SIZE',
|
||||
0xE5: 'SHA204_BAD_CRC',
|
||||
0xE6: 'SHA204_RX_FAIL',
|
||||
0xE7: 'SHA204_RX_NO_RESPONSE',
|
||||
0xE8: 'SHA204_RESYNC_WITH_WAKEUP',
|
||||
0xF0: 'SHA204_COMM_FAIL',
|
||||
0xF1: 'SHA204_TIMEOUT',
|
||||
0xFA: 'ID_DATA_LOCKED',
|
||||
0xFB: 'ID_CONFIG_LOCKED',
|
||||
0xFC: 'ID_INVALID_SLOT',
|
||||
0xFD: 'ID_DATA_PARSING_ERROR',
|
||||
0xFE: 'ID_DATA_NOT_EQUAL'}
|
||||
|
||||
def __init__(self, transport):
|
||||
self.logger = getLogger(__name__)
|
||||
super(EdbgProtocol, self).__init__(
|
||||
transport, Jtagice3Protocol.HANDLER_EDBG)
|
||||
|
||||
def check_command_exists(self, command):
|
||||
"""
|
||||
Check if command is supported
|
||||
|
||||
Runs a query to the tool to get a list of supported commands, then looks for
|
||||
the input command in the list. If not supported, it raises NotImplementedError.
|
||||
|
||||
:param command: The command to test.
|
||||
:return: None
|
||||
"""
|
||||
commands_supported = self.query(self.EDBG_QUERY_COMMANDS)
|
||||
if command not in commands_supported:
|
||||
raise NotImplementedError("Invalid command: 0x{:02X}".format(command))
|
||||
|
||||
def error_as_string(self, code):
|
||||
"""
|
||||
Get the response error as a string (error code translated to descriptive string)
|
||||
|
||||
:param code: error code
|
||||
:return: error code as descriptive string
|
||||
"""
|
||||
try:
|
||||
return self.EDBG_ERRORS[code]
|
||||
except KeyError:
|
||||
return "Unknown error!"
|
||||
|
||||
def response_as_string(self, code):
|
||||
"""
|
||||
Get the response code as a string (response code translated to descriptive string)
|
||||
|
||||
:param code: response code
|
||||
:return: error code as descriptive string
|
||||
"""
|
||||
try:
|
||||
return self.RESPONSE_CODE[code]
|
||||
except KeyError:
|
||||
return "Unknown response!"
|
||||
|
||||
def program_id_chip(self, id_number, data):
|
||||
"""
|
||||
Program the connected ID device located at the id_number with data.
|
||||
|
||||
:param id_number: Extension header ID number (Range 1 - 16)
|
||||
:param data: A 64-byte data array to be programmed
|
||||
:return: Response status from the programming
|
||||
"""
|
||||
self.logger.info("Programming ID chip...")
|
||||
try:
|
||||
self.check_command_exists(self.CMD_EDBG_PROGRAM_ID_CHIP)
|
||||
except NotImplementedError as err:
|
||||
self.logger.warning("Non-compliant command: %s", err)
|
||||
|
||||
# Old EDBG implementations contained a non-compliant version of this command
|
||||
# Version 0 command
|
||||
packet = bytearray([self.CMD_EDBG_PROGRAM_ID_CHIP, self.CMD_VERSION0, id_number - 1] + data)
|
||||
resp = self.jtagice3_command_response_raw(packet)
|
||||
self.logger.debug("Program ID response: %s", self.response_as_string(resp[3]))
|
||||
return resp[3]
|
||||
else:
|
||||
# Version 1 command
|
||||
packet = bytearray([self.CMD_EDBG_PROGRAM_ID_CHIP, self.CMD_VERSION1, id_number] + data)
|
||||
status = self.check_response(self.jtagice3_command_response(packet))
|
||||
self.logger.debug("Program ID response: %s", self.response_as_string(status[0]))
|
||||
return status[0]
|
||||
|
||||
def refresh_id_chip(self):
|
||||
"""
|
||||
Forces a refresh of the list of connected ID devices.
|
||||
|
||||
:return: None
|
||||
"""
|
||||
self.logger.info("Refreshing ID chip...")
|
||||
try:
|
||||
self.check_command_exists(self.CMD_EDBG_REFRESH_ID_CHIP)
|
||||
except NotImplementedError as err:
|
||||
self.logger.warning("Non-compliant command: %s", err)
|
||||
|
||||
# Old EDBG implementations contained a non-compliant version of this command
|
||||
# Version 0 command
|
||||
packet = bytearray([self.CMD_EDBG_REFRESH_ID_CHIP, self.CMD_VERSION0])
|
||||
resp = self.jtagice3_command_response_raw(packet)
|
||||
if not resp[3] == self.RSP_EDBG_OK:
|
||||
raise IOError("Invalid response from CMD_EDBG_REFRESH_ID_CHIP")
|
||||
else:
|
||||
# Version 1 command
|
||||
packet = bytearray([self.CMD_EDBG_REFRESH_ID_CHIP, self.CMD_VERSION1])
|
||||
self.check_response(self.jtagice3_command_response(packet))
|
||||
|
||||
def read_id_chip(self, id_number):
|
||||
"""
|
||||
Reads the ID information from the ID chip connected at id_number
|
||||
|
||||
:param id_number: Extension header ID number (Range 1 - 16)
|
||||
:return: A 64-byte data array
|
||||
"""
|
||||
self.logger.info("Reading ID chip...")
|
||||
try:
|
||||
self.check_command_exists(self.CMD_EDBG_READ_ID_CHIP)
|
||||
except NotImplementedError as err:
|
||||
self.logger.warning("Non-compliant command: %s", err)
|
||||
|
||||
# Old EDBG implementations contained a non-compliant version of this command
|
||||
# Version 0 command
|
||||
packet = bytearray([self.CMD_EDBG_READ_ID_CHIP, self.CMD_VERSION0, id_number - 1])
|
||||
resp = self.jtagice3_command_response_raw(packet)
|
||||
if resp[4] == self.RSP_EDBG_DATA:
|
||||
return resp[6:]
|
||||
return False
|
||||
else:
|
||||
# Version 1 command
|
||||
packet = bytearray([self.CMD_EDBG_READ_ID_CHIP, self.CMD_VERSION1, id_number])
|
||||
data = self.check_response(self.jtagice3_command_response(packet))
|
||||
return data
|
||||
|
||||
def read_edbg_extra_info(self):
|
||||
"""
|
||||
Reads the kit info flash page, containing board specific data
|
||||
|
||||
:return: A data array containing the kit info
|
||||
"""
|
||||
self.logger.info("Reading kit info...")
|
||||
|
||||
# The second parameter tells the debugger it is the only command
|
||||
# The last parameter tells what to read. If zero a whole page is read, and
|
||||
# if non-zero 32-bytes is fetched from offset 32 * parameter. The parameter
|
||||
# cannot be greater than 8
|
||||
response = self.dap_command_response(bytearray([self.AVR_GET_CONFIG, 0x01,
|
||||
self.EDBG_CONFIG_KIT_DATA, 0x0]))
|
||||
|
||||
# Remove unused data
|
||||
if len(response) >= 256 + 6:
|
||||
self.logger.info("Response size is truncated")
|
||||
response = response[:256 + 6]
|
||||
|
||||
# Byte 0 will echo the current command
|
||||
# Byte 1 show the command status
|
||||
if response[0] == self.AVR_GET_CONFIG:
|
||||
|
||||
# Check the status code
|
||||
if response[1] == 0:
|
||||
# Bytes [3..2] contain the received size
|
||||
size = unpack_be16(response[2:4])
|
||||
return response[6:size]
|
||||
|
||||
self.logger.warning("Command failed with error: %i", response[1])
|
||||
|
||||
self.logger.warning("Command was not echoed back")
|
||||
return False
|
||||
@@ -0,0 +1,141 @@
|
||||
"""Implements Housekeeping Protocol, a sub-protocol in the JTAGICE3 family of protocols."""
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
from .jtagice3protocol import Jtagice3Protocol
|
||||
from .jtagice3protocol import Jtagice3ResponseError
|
||||
from ..util import binary
|
||||
|
||||
|
||||
class Jtagice3HousekeepingProtocol(Jtagice3Protocol):
|
||||
"""Implements housekeeping functionality on the JTAGICE3 protocol family"""
|
||||
|
||||
# Query contexts
|
||||
HOUSEKEEPING_QUERY_COMMANDS = 0x00 # List supported commands
|
||||
HOUSEKEEPING_QUERY_ANALOG_CHANNELS = 0x01 # List which analog channels are present
|
||||
HOUSEKEEPING_QUERY_SPECIAL_ABILITIES = 0x02 # List special abilities
|
||||
|
||||
# Protocol commands
|
||||
CMD_HOUSEKEEPING_START_SESSION = 0x10 # Sign on
|
||||
CMD_HOUSEKEEPING_END_SESSION = 0x11 # Sign off
|
||||
CMD_HOUSEKEEPING_FW_UPGRADE = 0x50 # Enter upgrade mode
|
||||
|
||||
# Get/Set contexts
|
||||
HOUSEKEEPING_CONTEXT_CONFIG = 0x00 # Configuration parameters
|
||||
HOUSEKEEPING_CONTEXT_ANALOG = 0x01 # Analog parameters
|
||||
HOUSEKEEPING_CONTEXT_STATEMENT = 0x02 # Statement memory (deprecated)
|
||||
HOUSEKEEPING_CONTEXT_USB = 0x03 # USB parameters
|
||||
HOUSEKEEPING_CONTEXT_STATISTICS = 0x80 # Statistics
|
||||
HOUSEKEEPING_CONTEXT_DIAGNOSTICS = 0x81 # Diagnostics
|
||||
|
||||
# Config context
|
||||
HOUSEKEEPING_CONFIG_HWREV = 0x00 # Hardware version
|
||||
HOUSEKEEPING_CONFIG_FWREV_MAJ = 0x01 # Major firmware version
|
||||
HOUSEKEEPING_CONFIG_FWREV_MIN = 0x02 # Minor firmware version
|
||||
HOUSEKEEPING_CONFIG_BUILD = 0x03 # Build number (2 bytes)
|
||||
HOUSEKEEPING_CONFIG_CHIP = 0x05 # Chipset ID
|
||||
HOUSEKEEPING_CONFIG_BLDR_MAJ = 0x06 # Bootloader major version
|
||||
HOUSEKEEPING_CONFIG_BLDR_MIN = 0x07 # Bootloader minor version
|
||||
HOUSEKEEPING_CONFIG_DEBUG_BUILD = 0x08 # Debug build flag
|
||||
HOUSEKEEPING_CONFIG_FIRMWARE_IMAGE = 0x09 # Firmware Image enumerator
|
||||
|
||||
# USB context
|
||||
HOUSEKEEPING_USB_MAX_READ = 0x00 # Maximum USB read block size
|
||||
HOUSEKEEPING_USB_MAX_WRITE = 0x01 # Maximum USB write block size
|
||||
HOUSEKEEPING_USB_EP_SIZE_HID = 0x10 # Current HID endpoint size
|
||||
HOUSEKEEPING_USB_EP_SIZE_CDC = 0x11 # Current CDC endpoint size
|
||||
|
||||
# Diagnostics
|
||||
HOUSEKEEPING_DIAGNOSTICS_RESET_CAUSE = 0x00 # Last reset cause
|
||||
HOUSEKEEPING_DIAGNOSTICS_BOD_CTRL = 0x01 # BOD register
|
||||
HOUSEKEEPING_HOST_ID = 0x02 # Debugger host device identifier
|
||||
HOUSEKEEPING_HOST_REV = 0x03 # Debugger host device revision
|
||||
HOUSEKEEPING_MODULE_VER_JTAG = 0x04 # Debugger host JTAG master version
|
||||
HOUSEKEEPING_MODULE_VER_AW = 0x05 # Debugger host aWire master version
|
||||
HOUSEKEEPING_DIAGNOSTICS_CPU_CLK = 0x06 # Debugger host CPU clock speed
|
||||
|
||||
# Analog
|
||||
HOUSEKEEPING_ANALOG_VTREF = 0x00 # Target voltage reference value
|
||||
HOUSEKEEPING_ANALOG_VTG_BUF = 0x01 # Bufferred target voltage reference
|
||||
HOUSEKEEPING_ANALOG_VUSB = 0x02 # USB voltage
|
||||
HOUSEKEEPING_TSUP_VOLTAGE = 0x20 # Target supply voltage setpoint
|
||||
|
||||
# Special Abilities
|
||||
HOUSEKEEPING_ABILITY_RESET_EXTENSION = 0x00 # This tool is capable of reset extension
|
||||
HOUSEKEEPING_ABILITY_HV_UPDI_ENABLE = 0x10 # This tool is capable of UPDI high-voltage activation
|
||||
|
||||
def __init__(self, transport):
|
||||
super(Jtagice3HousekeepingProtocol, self).__init__(transport, Jtagice3Protocol.HANDLER_HOUSEKEEPING)
|
||||
self.logger = getLogger(__name__)
|
||||
self.logger.debug("Created AVR housekeeping protocol")
|
||||
|
||||
def list_supported_commands(self):
|
||||
"""Uses the query interface to list all supported commands"""
|
||||
self.logger.debug("Querying commands supported by this instance of housekeeping handler")
|
||||
commands = self.query(self.HOUSEKEEPING_QUERY_COMMANDS)
|
||||
return commands
|
||||
|
||||
# Direct protocol commands
|
||||
def start_session(self):
|
||||
"""Starts a session with the debugger (sign-on)"""
|
||||
self.logger.debug("Housekeeping::start_session")
|
||||
response = self.jtagice3_command_response(bytearray([self.CMD_HOUSEKEEPING_START_SESSION, self.CMD_VERSION0]))
|
||||
self.check_response(response)
|
||||
|
||||
def end_session(self, reset_tool=False):
|
||||
"""
|
||||
Ends a session with the debugger (sign-off)
|
||||
|
||||
:param reset_tool: resets the hardware
|
||||
:return:
|
||||
"""
|
||||
self.logger.debug("Housekeeping::end_session")
|
||||
response = self.jtagice3_command_response(
|
||||
bytearray([self.CMD_HOUSEKEEPING_END_SESSION, self.CMD_VERSION0, 1 if reset_tool else 0]))
|
||||
self.check_response(response)
|
||||
|
||||
def enter_upgrade_mode(self, key=0x31727C10):
|
||||
"""
|
||||
Puts the debugger into firmware upgrade mode
|
||||
|
||||
:param key: upgrade key
|
||||
:return:
|
||||
"""
|
||||
self.logger.debug("Housekeeping::enter_upgrade_mode")
|
||||
try:
|
||||
response = self.jtagice3_command_response(
|
||||
bytearray([self.CMD_HOUSEKEEPING_FW_UPGRADE, self.CMD_VERSION0]) + binary.pack_be32(key))
|
||||
except IOError:
|
||||
self.logger.debug("IOError on enter upgrade mode. Device rebooted before response was read.")
|
||||
else:
|
||||
self.check_response(response)
|
||||
|
||||
def read_version_info(self):
|
||||
"""Reads version info from the debugger"""
|
||||
self.logger.debug("Housekeeping::reading version info")
|
||||
|
||||
# Results in dict form
|
||||
versions = {
|
||||
# HW version
|
||||
'hardware': self.get_byte(self.HOUSEKEEPING_CONTEXT_CONFIG, self.HOUSEKEEPING_CONFIG_HWREV),
|
||||
# FW version
|
||||
'firmware_major': self.get_byte(self.HOUSEKEEPING_CONTEXT_CONFIG, self.HOUSEKEEPING_CONFIG_FWREV_MAJ),
|
||||
'firmware_minor': self.get_byte(self.HOUSEKEEPING_CONTEXT_CONFIG, self.HOUSEKEEPING_CONFIG_FWREV_MIN),
|
||||
'build': self.get_le16(self.HOUSEKEEPING_CONTEXT_CONFIG, self.HOUSEKEEPING_CONFIG_BUILD),
|
||||
# BLDR
|
||||
'bootloader': self.get_le16(self.HOUSEKEEPING_CONTEXT_CONFIG, self.HOUSEKEEPING_CONFIG_BLDR_MAJ),
|
||||
# Host info
|
||||
'chip': self.get_byte(self.HOUSEKEEPING_CONTEXT_CONFIG, self.HOUSEKEEPING_CONFIG_CHIP),
|
||||
'host_id': self.get_le32(self.HOUSEKEEPING_CONTEXT_DIAGNOSTICS, self.HOUSEKEEPING_HOST_ID),
|
||||
'host_rev': self.get_byte(self.HOUSEKEEPING_CONTEXT_DIAGNOSTICS, self.HOUSEKEEPING_HOST_REV),
|
||||
# Misc
|
||||
'debug': self.get_byte(self.HOUSEKEEPING_CONTEXT_CONFIG, self.HOUSEKEEPING_CONFIG_DEBUG_BUILD)
|
||||
}
|
||||
|
||||
# Firmware Image Requirement Enumerator is only supported on some tools
|
||||
try:
|
||||
versions['fire'] = self.get_byte(self.HOUSEKEEPING_CONTEXT_CONFIG, self.HOUSEKEEPING_CONFIG_FIRMWARE_IMAGE)
|
||||
except Jtagice3ResponseError:
|
||||
versions['fire'] = None
|
||||
|
||||
return versions
|
||||
@@ -0,0 +1,337 @@
|
||||
"""JTAGICE3 protocol mappings"""
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
from .avrcmsisdap import AvrCommand
|
||||
from ..util import binary
|
||||
from ..util import print_helpers
|
||||
from ..pyedbglib_errors import PyedbglibError
|
||||
|
||||
|
||||
class Jtagice3Command(AvrCommand):
|
||||
"""
|
||||
Sends a "JTAGICE3" command frame, and received a response
|
||||
|
||||
JTAGICE3 protocol header is formatted:
|
||||
JTAGICE3_TOKEN 0x0E
|
||||
PROTOCOL_VERSION 0
|
||||
SEQUENCE_NUMBER_L
|
||||
SEQUENCE_NUMBER_H
|
||||
HANDLER_ID
|
||||
PAYLOAD
|
||||
|
||||
Response format is:
|
||||
JTAGICE3_TOKEN 0x0E
|
||||
SEQUENCE_NUMBER_L echo
|
||||
SEQUENCE_NUMBER_H echo
|
||||
HANDLER_ID
|
||||
PAYLOAD
|
||||
"""
|
||||
|
||||
# JTAGICE3 protocol token
|
||||
JTAGICE3_TOKEN = 0x0E
|
||||
JTAGICE3_PROTOCOL_VERSION = 0x00
|
||||
|
||||
# Handlers within JTAGICE3 protocol
|
||||
HANDLER_DISCOVERY = 0x00
|
||||
HANDLER_HOUSEKEEPING = 0x01
|
||||
HANDLER_SPI = 0x11
|
||||
HANDLER_AVR8_GENERIC = 0x12
|
||||
HANDLER_AVR32_GENERIC = 0x13
|
||||
HANDLER_TPI = 0x14
|
||||
HANDLER_EDBG = 0x20
|
||||
HANDLER_COPROCESSOR = 0x21
|
||||
HANDLER_POWER = 0x22
|
||||
HANDLER_SELFTEST = 0x81
|
||||
|
||||
def __init__(self, transport, handler):
|
||||
super(Jtagice3Command, self).__init__(transport)
|
||||
self.logger = getLogger(__name__)
|
||||
self.logger.debug("Created JTAGICE3 command")
|
||||
self.handler = handler
|
||||
self.sequence_id = 0
|
||||
|
||||
def validate_response(self, response):
|
||||
"""
|
||||
Validates the response form the debugger
|
||||
|
||||
:param response: raw response bytes
|
||||
"""
|
||||
self.logger.debug("Checking response (%s)", print_helpers.bytelist_to_hex_string(response))
|
||||
|
||||
# Check length first
|
||||
if len(response) < 5:
|
||||
raise PyedbglibError("Invalid response length ({:d}).".format(len(response)))
|
||||
|
||||
# Check token
|
||||
if response[0] != self.JTAGICE3_TOKEN:
|
||||
raise PyedbglibError("Invalid token (0x{:02X}) in response.".format(response[0]))
|
||||
|
||||
# Check sequence
|
||||
sequence = response[1] + (response[2] << 8)
|
||||
if self.sequence_id != sequence:
|
||||
raise PyedbglibError(
|
||||
"Invalid sequence in response (0x{:04X} vs 0x{:04X}).".format(self.sequence_id, sequence))
|
||||
|
||||
# Check handler
|
||||
if response[3] != self.handler:
|
||||
raise PyedbglibError("Invalid handler (0x{:02X}) in response.".format(response[3]))
|
||||
|
||||
def jtagice3_command_response_raw(self, command):
|
||||
"""
|
||||
Sends a JTAGICE3 command and receives the corresponding response
|
||||
|
||||
:param command:
|
||||
:return:
|
||||
"""
|
||||
# Header
|
||||
header = bytearray([self.JTAGICE3_TOKEN, self.JTAGICE3_PROTOCOL_VERSION, self.sequence_id & 0xFF,
|
||||
(self.sequence_id >> 8) & 0xFF, self.handler])
|
||||
|
||||
# Send command, receive response
|
||||
packet = header + bytearray(command)
|
||||
response = self.avr_command_response(packet)
|
||||
return response
|
||||
|
||||
def jtagice3_command_response(self, command):
|
||||
"""
|
||||
Sends a JTAGICE3 command and receives the corresponding response, and validates it
|
||||
|
||||
:param command:
|
||||
:return:
|
||||
"""
|
||||
response = self.jtagice3_command_response_raw(command)
|
||||
|
||||
# Increment sequence number
|
||||
self.sequence_id += 1
|
||||
if self.sequence_id > 0xFFFE:
|
||||
self.sequence_id = 1
|
||||
|
||||
# Peel and return
|
||||
return response[4:]
|
||||
|
||||
|
||||
class Jtagice3ResponseError(Exception):
|
||||
"""Exception type for JTAGICE3 responses"""
|
||||
|
||||
def __init__(self, msg, code):
|
||||
super(Jtagice3ResponseError, self).__init__(msg)
|
||||
# self.message = msg
|
||||
self.code = code
|
||||
|
||||
|
||||
class Jtagice3Protocol(Jtagice3Command):
|
||||
"""
|
||||
Base class for all protocols in the JTAGICE3 family.
|
||||
|
||||
All sub-protocols support query, get and set commands.
|
||||
"""
|
||||
|
||||
# Command versioning
|
||||
CMD_VERSION0 = 0
|
||||
CMD_VERSION1 = 1
|
||||
|
||||
# All handler share these functions:
|
||||
CMD_QUERY = 0x00
|
||||
CMD_SET = 0x01
|
||||
CMD_GET = 0x02
|
||||
|
||||
# And these base responses
|
||||
PROTOCOL_OK = 0x80
|
||||
PROTOCOL_LIST = 0x81
|
||||
PROTOCOL_DATA = 0x84
|
||||
PROTOCOL_FAILED = 0xA0
|
||||
# PROTOCOL_FAILED_WITH_DATA = 0xA1
|
||||
|
||||
# Failure codes
|
||||
FAILURE_OK = 0
|
||||
|
||||
# CMD_SET and CMD_GET failure codes
|
||||
SETGET_FAILURE_OK = 0x00
|
||||
SETGET_FAILURE_NOT_IMPLEMENTED = 0x10
|
||||
SETGET_FAILURE_NOT_SUPPORTED = 0x11
|
||||
SETGET_FAILURE_INVALID_CLOCK_SPEED = 0x20
|
||||
SETGET_FAILURE_ILLEGAL_STATE = 0x21
|
||||
SETGET_FAILURE_JTAGM_INIT_ERROR = 0x22
|
||||
SETGET_FAILURE_INVALID_VALUE = 0x23
|
||||
SETGET_FAILURE_HANDLER_ERROR = 0x30
|
||||
|
||||
"""Mapping JTAGICE3 error codes to more human friendly strings"""
|
||||
JTAGICE3_ERRORS = {0: 'SUCCESS'}
|
||||
|
||||
def __init__(self, transport, handler, supports_trailing_status=True):
|
||||
super(Jtagice3Protocol, self).__init__(transport, handler)
|
||||
self.logger = getLogger(__name__)
|
||||
self.logger.debug("Created JTAGICE3 protocol")
|
||||
self.supports_trailing_status = supports_trailing_status
|
||||
|
||||
def check_response(self, response, expected=None):
|
||||
"""
|
||||
Checks the response for known errors
|
||||
|
||||
:param response: response bytes
|
||||
:param expected: expected response
|
||||
:return: data from response
|
||||
"""
|
||||
status, data = self.peel_response(response, expected)
|
||||
if not status:
|
||||
error_message = self.error_as_string(data[0])
|
||||
msg = "JTAGICE3 error response code 0x{:02X}: '{:s}' ".format(data[0], error_message)
|
||||
self.logger.error(msg)
|
||||
raise Jtagice3ResponseError(error_message, data[0])
|
||||
|
||||
return data
|
||||
|
||||
def error_as_string(self, code):
|
||||
"""
|
||||
Get the response error as a string (error code translated to descriptive string)
|
||||
|
||||
:param code: error code
|
||||
:return: error code as descriptive string
|
||||
"""
|
||||
try:
|
||||
return self.JTAGICE3_ERRORS[code]
|
||||
except KeyError:
|
||||
return "Unknown error!"
|
||||
|
||||
def peel_response(self, response, expected=None):
|
||||
"""
|
||||
Process the response, extracting error codes and data
|
||||
|
||||
:param response: raw response bytes
|
||||
:param expected: expected response
|
||||
:return: status, data
|
||||
"""
|
||||
return_list = False, [0xFF]
|
||||
# Special handling
|
||||
if expected is not None and response[0] == expected:
|
||||
return_list = True, response[2:]
|
||||
else:
|
||||
if response[0] == self.PROTOCOL_OK:
|
||||
return_list = True, []
|
||||
elif response[0] == self.PROTOCOL_LIST:
|
||||
return_list = True, response[2:]
|
||||
elif response[0] == self.PROTOCOL_DATA:
|
||||
# Trailing status is not included on some handlers
|
||||
if self.supports_trailing_status and response[-1] == self.FAILURE_OK:
|
||||
return_list = True, response[2:-1]
|
||||
else:
|
||||
return_list = False, [response[-1]]
|
||||
elif response[0] == self.PROTOCOL_FAILED:
|
||||
return_list = False, [response[2]]
|
||||
|
||||
return return_list
|
||||
|
||||
def query(self, context):
|
||||
"""
|
||||
Queries functionality using the QUERY API
|
||||
|
||||
:param context: Query context
|
||||
:return: List of supported entries
|
||||
"""
|
||||
self.logger.debug("Query to context 0x{:02X}".format(context))
|
||||
resp = self.jtagice3_command_response([self.CMD_QUERY, self.CMD_VERSION0, context])
|
||||
status, data = self.peel_response(resp)
|
||||
if not status:
|
||||
msg = "Unable to QUERY (failure code 0x{:02X})".format(data[0])
|
||||
raise PyedbglibError(msg)
|
||||
return data
|
||||
|
||||
def set_byte(self, context, offset, value):
|
||||
"""
|
||||
Sets a single byte parameter
|
||||
|
||||
:param context: context (address) to set
|
||||
:param offset: offset address to set
|
||||
:param value: value to set
|
||||
:return:
|
||||
"""
|
||||
self._set_protocol(context, offset, bytearray([value]))
|
||||
|
||||
def set_le16(self, context, offset, value):
|
||||
"""
|
||||
Sets a little-endian 16-bit parameter
|
||||
|
||||
:param context: context (address) to set
|
||||
:param offset: offset address to set
|
||||
:param value: value to set
|
||||
"""
|
||||
self._set_protocol(context, offset, binary.pack_le16(value))
|
||||
|
||||
def set_le32(self, context, offset, value):
|
||||
"""
|
||||
Sets a little-endian 32-bit parameter
|
||||
|
||||
:param context: context (address) to set
|
||||
:param offset: offset address to set
|
||||
:param value: value to set
|
||||
"""
|
||||
self._set_protocol(context, offset, binary.pack_le32(value))
|
||||
|
||||
def _set_protocol(self, context, offset, data):
|
||||
"""
|
||||
Generic function for setting parameters
|
||||
|
||||
:param context: context (address) to set
|
||||
:param offset: offset address to set
|
||||
:param data: values to set
|
||||
"""
|
||||
self.logger.debug("JTAGICE3::set {:d} byte(s) to context {:d} offset {:d}".format(len(data),
|
||||
context,
|
||||
offset))
|
||||
resp = self.jtagice3_command_response(
|
||||
bytearray([self.CMD_SET, self.CMD_VERSION0, context, offset, len(data)]) + data)
|
||||
resp_status, resp_data = self.peel_response(resp)
|
||||
if not resp_status:
|
||||
msg = "Unable to SET (failure code 0x{:02X})".format(resp_data[0])
|
||||
raise PyedbglibError(msg)
|
||||
|
||||
def get_byte(self, context, offset):
|
||||
"""
|
||||
Get a single-byte parameter
|
||||
|
||||
:param context: context (address) to set
|
||||
:param offset: offset address to set
|
||||
:return: value read
|
||||
"""
|
||||
data = self._get_protocol(context, offset, 1)
|
||||
return data[0]
|
||||
|
||||
def get_le16(self, context, offset):
|
||||
"""
|
||||
Get a little-endian 16-bit parameter
|
||||
|
||||
:param context: context (address) to set
|
||||
:param offset: offset address to set
|
||||
:return: value read
|
||||
"""
|
||||
data = self._get_protocol(context, offset, 2)
|
||||
return binary.unpack_le16(data)
|
||||
|
||||
def get_le32(self, context, offset):
|
||||
"""
|
||||
Get a little-endian 32-bit parameter
|
||||
|
||||
:param context: context (address) to set
|
||||
:param offset: offset address to set
|
||||
:return: value read
|
||||
"""
|
||||
data = self._get_protocol(context, offset, 4)
|
||||
return binary.unpack_le32(data)
|
||||
|
||||
def _get_protocol(self, context, offset, numbytes):
|
||||
"""
|
||||
Generic function to get a parameter
|
||||
|
||||
:param context: context (address) to set
|
||||
:param offset: offset address to set
|
||||
:param numbytes: number of bytes to get
|
||||
:return: value read
|
||||
"""
|
||||
self.logger.debug("JTAGICE3::get {:d} byte(s) from context {:d} offset {:d}".format(numbytes, context, offset))
|
||||
resp = self.jtagice3_command_response([self.CMD_GET, self.CMD_VERSION0, context, offset, numbytes])
|
||||
status, data = self.peel_response(resp)
|
||||
if not status:
|
||||
msg = "Unable to GET (failure code 0x{:02X})".format(data[0])
|
||||
raise Jtagice3ResponseError(msg, data)
|
||||
return data
|
||||
21
software/tools/pymcuprog/libs/pyedbglib/pyedbglib_errors.py
Normal file
21
software/tools/pymcuprog/libs/pyedbglib/pyedbglib_errors.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""
|
||||
pyedbglib specific exceptions
|
||||
"""
|
||||
|
||||
class PyedbglibError(Exception):
|
||||
"""
|
||||
Base class for all pyedbglib specific exceptions
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PyedbglibError, self).__init__(msg)
|
||||
self.code = code
|
||||
|
||||
class PyedbglibNotSupportedError(PyedbglibError):
|
||||
"""
|
||||
Signals that an attempted operation is not supported
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PyedbglibNotSupportedError, self).__init__(msg)
|
||||
self.code = code
|
||||
146
software/tools/pymcuprog/libs/pyedbglib/util/binary.py
Normal file
146
software/tools/pymcuprog/libs/pyedbglib/util/binary.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""Packing and unpacking numbers into bytearrays of 8-bit values with various endian encodings"""
|
||||
|
||||
from numbers import Integral
|
||||
|
||||
def _check_input_value(value, bits):
|
||||
"""
|
||||
:param value: An integer
|
||||
:param bits: Number of bits used to represent this integer
|
||||
:return: Raises an OverflowError if the value is too large
|
||||
"""
|
||||
# Be sure to support both py2 and py3
|
||||
if not isinstance(value, Integral):
|
||||
raise TypeError("The input {} is not an Integral type".format(value))
|
||||
|
||||
if value > (2 ** bits) - 1:
|
||||
raise OverflowError("Value {} is larger than the maximum value {}".format(value, (2 ** bits) - 1))
|
||||
|
||||
|
||||
def pack_le32(value):
|
||||
"""
|
||||
:param value: input value
|
||||
:return: 32-bit little endian bytearray representation of the input value
|
||||
"""
|
||||
_check_input_value(value, 32)
|
||||
return bytearray([value & 0xFF, (value >> 8) & 0xFF, (value >> 16) & 0xFF, (value >> 24) & 0xFF])
|
||||
|
||||
|
||||
def pack_be32(value):
|
||||
"""
|
||||
:param value: input value
|
||||
:return: 32-bit big endian bytearray representation of the input value
|
||||
"""
|
||||
_check_input_value(value, 32)
|
||||
return bytearray(
|
||||
[(value >> 24) & 0xFF,
|
||||
(value >> 16) & 0xFF,
|
||||
(value >> 8) & 0xFF,
|
||||
value & 0xFF])
|
||||
|
||||
|
||||
def pack_le24(value):
|
||||
"""
|
||||
:param value: input value
|
||||
:return: 24-bit little endian bytearray representation of the input value
|
||||
"""
|
||||
_check_input_value(value, 24)
|
||||
return bytearray([value & 0xFF, (value >> 8) & 0xFF, (value >> 16) & 0xFF])
|
||||
|
||||
|
||||
def pack_be24(value):
|
||||
"""
|
||||
:param value: input value
|
||||
:return: 24-bit big endian bytearray representation of the input value
|
||||
"""
|
||||
_check_input_value(value, 24)
|
||||
return bytearray(
|
||||
[(value >> 16) & 0xFF,
|
||||
(value >> 8) & 0xFF,
|
||||
value & 0xFF])
|
||||
|
||||
|
||||
def pack_le16(value):
|
||||
"""
|
||||
:param value: input value
|
||||
:return: 16-bit little endian bytearray representation of the input value
|
||||
"""
|
||||
_check_input_value(value, 16)
|
||||
return bytearray([value & 0xFF, (value >> 8) & 0xFF])
|
||||
|
||||
|
||||
def pack_be16(value):
|
||||
"""
|
||||
:param value: input value
|
||||
:return: 16-bit big endian bytearray representation of the input value
|
||||
"""
|
||||
_check_input_value(value, 16)
|
||||
return bytearray([(value >> 8) & 0xFF, value & 0xFF])
|
||||
|
||||
|
||||
def _check_input_array(data, length):
|
||||
"""
|
||||
Used to check if a bytearray or list of 8-bit values has the correct length to convert to an integer
|
||||
|
||||
:param data: bytearray (or list) representing a value
|
||||
:param length: Expected length of the list
|
||||
:return: Raises a ValueError if len(data) is not the same as length
|
||||
"""
|
||||
if not isinstance(data, (list, bytearray)):
|
||||
raise TypeError("The input {} is not a list of bytearray".format(data))
|
||||
|
||||
if len(data) != length:
|
||||
raise ValueError("Input data {} does not have length {}".format(data, length))
|
||||
|
||||
|
||||
def unpack_le32(data):
|
||||
"""
|
||||
:param data: 32-bit little endian bytearray representation of an integer
|
||||
:return: integer value
|
||||
"""
|
||||
_check_input_array(data, 4)
|
||||
return data[0] + (data[1] << 8) + (data[2] << 16) + (data[3] << 24)
|
||||
|
||||
|
||||
def unpack_be32(data):
|
||||
"""
|
||||
:param data: 32-bit big endian bytearray representation of an integer
|
||||
:return: integer value
|
||||
"""
|
||||
_check_input_array(data, 4)
|
||||
return data[3] + (data[2] << 8) + (data[1] << 16) + (data[0] << 24)
|
||||
|
||||
|
||||
def unpack_le24(data):
|
||||
"""
|
||||
:param data: 24-bit little endian bytearray representation of an integer
|
||||
:return: integer value
|
||||
"""
|
||||
_check_input_array(data, 3)
|
||||
return data[0] + (data[1] << 8) + (data[2] << 16)
|
||||
|
||||
|
||||
def unpack_be24(data):
|
||||
"""
|
||||
:param data: 24-bit big endian bytearray representation of an integer
|
||||
:return: integer value
|
||||
"""
|
||||
_check_input_array(data, 3)
|
||||
return data[2] + (data[1] << 8) + (data[0] << 16)
|
||||
|
||||
|
||||
def unpack_le16(data):
|
||||
"""
|
||||
:param data: 16-bit little endian bytearray representation of an integer
|
||||
:return: integer value
|
||||
"""
|
||||
_check_input_array(data, 2)
|
||||
return data[0] + (data[1] << 8)
|
||||
|
||||
|
||||
def unpack_be16(data):
|
||||
"""
|
||||
:param data: 16-bit big endian bytearray representation of an integer
|
||||
:return: integer value
|
||||
"""
|
||||
_check_input_array(data, 2)
|
||||
return data[1] + (data[0] << 8)
|
||||
@@ -0,0 +1,8 @@
|
||||
"""Generating string representations of variables for nice printouts"""
|
||||
|
||||
def bytelist_to_hex_string(bytelist):
|
||||
"""
|
||||
:param bytelist: list of byte values
|
||||
:return: String representation of the bytelist with each item as a byte value on the format 0xXX
|
||||
"""
|
||||
return '[' + ', '.join("0x%02X" % x for x in bytelist) + ']'
|
||||
81
software/tools/pymcuprog/libs/pymcuprog/__init__.py
Normal file
81
software/tools/pymcuprog/libs/pymcuprog/__init__.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""
|
||||
Python MCU programmer utility
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
pymcuprog is a utility for programming various Microchip MCU devices using Microchip CMSIS-DAP based debuggers
|
||||
|
||||
pymcuprog can be used as a library using its "backend API". For example:
|
||||
|
||||
Setup logging - pymcuprog uses the Python logging module
|
||||
>>> import logging
|
||||
>>> logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING)
|
||||
|
||||
Configure the session:
|
||||
>>> from pymcuprog.backend import SessionConfig
|
||||
>>> sessionconfig = SessionConfig("atmega4808")
|
||||
|
||||
Instantiate USB transport (only 1 tool connected)
|
||||
>>> from pymcuprog.toolconnection import ToolUsbHidConnection
|
||||
>>> transport = ToolUsbHidConnection()
|
||||
|
||||
Instantiate backend
|
||||
>>> from pymcuprog.backend import Backend
|
||||
>>> backend = Backend()
|
||||
|
||||
Connect to tool using transport
|
||||
>>> backend.connect_to_tool(transport)
|
||||
|
||||
Start the session
|
||||
>>> backend.start_session(sessionconfig)
|
||||
|
||||
Read the target device_id
|
||||
>>> device_id = backend.read_device_id()
|
||||
>>> print ("Device ID is {0:06X}".format(int.from_bytes(d, byteorder="little")))
|
||||
|
||||
Print the pymcuprog package version:
|
||||
>>> from pymcuprog.version import VERSION as pymcuprog_version
|
||||
>>> print("pymcuprog version {}".format(pymcuprog_version))
|
||||
|
||||
In addition, the CLI-backend API is versioned for convenience:
|
||||
>>> print("pymcuprog backend API version: {}".format(backend.get_api_version()))
|
||||
|
||||
Logging
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
This package uses the Python logging module for publishing log messages to library users.
|
||||
A basic configuration can be used (see example), but for best results a more thorough configuration is
|
||||
recommended in order to control the verbosity of output from dependencies in the stack which also use logging.
|
||||
See logging.yaml which is included in the package (although only used for CLI)
|
||||
|
||||
Dependencies
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
pymcuprog depends on pyedbglib for its transport protocol.
|
||||
pyedbglib requires a USB transport library like libusb. See pyedbglib package for more information.
|
||||
|
||||
Supported devices and tools
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Note: pymcuprog is primarily intended for use with PKOB nano (nEDBG) debuggers which
|
||||
are found on Curiosity Nano kits and other development boards. This means that it is
|
||||
continuously tested with a selection of AVR devices with UPDI interface as well as a
|
||||
selection of PIC devices. However since the protocol is compatible between all
|
||||
EDBG-based debuggers (pyedbglib) it is possible to use pymcuprog with a wide range of
|
||||
debuggers and devices, although not all device families/interfaces have been implemented.
|
||||
|
||||
The following Atmel/Microchip debuggers are supported:
|
||||
* JTAGICE3 (only firmware version 3.x)
|
||||
* Atmel-ICE
|
||||
* Power Debugger
|
||||
* EDBG
|
||||
* mEDBG
|
||||
* PKOB nano (nEDBG)
|
||||
* MPLAB PICkit 4 ICD (only when in 'AVR mode')
|
||||
* MPLAB Snap ICD (only when in 'AVR mode')
|
||||
|
||||
Not all functionality is provided on all boards. See device support below.
|
||||
|
||||
The following device-types are supported:
|
||||
* All UPDI devices, whether mounted on kits or standalone
|
||||
* PIC devices mounted on Curiosity Nano kits, or similar board with PKOB nano (nEDBG) debugger
|
||||
* Other devices (eg ATmega328P, ATsamd21e18a) may be partially supported for experimental purposes
|
||||
"""
|
||||
import logging
|
||||
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
||||
658
software/tools/pymcuprog/libs/pymcuprog/backend.py
Normal file
658
software/tools/pymcuprog/libs/pymcuprog/backend.py
Normal file
@@ -0,0 +1,658 @@
|
||||
"""
|
||||
Backend interface for the pymcuprog utility.
|
||||
|
||||
This module is the boundary between the Command Line Interface (CLI) part and
|
||||
the backend part that does the actual job. Any external utility or script that
|
||||
needs access to the functionality provided by pymcuprog should connect to the
|
||||
interface provided by this backend module
|
||||
"""
|
||||
# Python 3 compatibility for Python 2
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
from logging import getLogger
|
||||
|
||||
# pyedbglib dependencies
|
||||
from pyedbglib.hidtransport.hidtransportfactory import hid_transport
|
||||
from pyedbglib.hidtransport.hidtransportbase import HidTransportBase
|
||||
from pyedbglib.protocols import housekeepingprotocol
|
||||
from pyedbglib.protocols.jtagice3protocol import Jtagice3ResponseError
|
||||
|
||||
from .pymcuprog_errors import PymcuprogToolConfigurationError, PymcuprogToolConnectionError
|
||||
from .pymcuprog_errors import PymcuprogNotSupportedError, PymcuprogEraseError
|
||||
from .pymcuprog_errors import PymcuprogSessionConfigError, PymcuprogSessionError
|
||||
from .programmer import Programmer
|
||||
from .deviceinfo import deviceinfo
|
||||
from .deviceinfo.memorynames import MemoryNames
|
||||
from .deviceinfo.memorynames import MemoryNameAliases
|
||||
from .deviceinfo.eraseflags import ChiperaseEffect
|
||||
from .deviceinfo.deviceinfokeys import DeviceInfoKeys, DeviceMemoryInfoKeys
|
||||
from .toolconnection import ToolUsbHidConnection, ToolSerialConnection
|
||||
from .utils import read_tool_info
|
||||
from .utils import read_target_voltage, read_supply_voltage_setpoint, read_usb_voltage
|
||||
from .utils import set_supply_voltage_setpoint
|
||||
from .hexfileutils import read_memories_from_hex
|
||||
|
||||
# Files in devices folder not representing devices
|
||||
NON_DEVICEFILES = ["__init__.py"]
|
||||
DEVICE_FOLDER = os.path.dirname(os.path.abspath(__file__)) + "//deviceinfo//devices"
|
||||
|
||||
# This class is a collection of parameters so no need for any methods
|
||||
#pylint: disable=too-few-public-methods
|
||||
class SessionConfig(object):
|
||||
"""
|
||||
Collection of all parameters needed when configuring a programming session
|
||||
|
||||
Used as input parameter for the start_session function
|
||||
"""
|
||||
device = None
|
||||
interface = None
|
||||
# For some interfaces this is baud in bits per second and for other interfaces this is clock frequency in Hz
|
||||
interface_speed = None
|
||||
# Path to python devicesupportscripts for PIC devices
|
||||
packpath = None
|
||||
|
||||
# Content and format of special_options will depend on the device stack implementation.
|
||||
# Normally these options are not in use.
|
||||
special_options = None
|
||||
|
||||
def __init__(self, device):
|
||||
"""
|
||||
device name is mandatory
|
||||
"""
|
||||
self.device = device
|
||||
|
||||
# To achieve a single entry point for users of the backend part of pymcuprog it is accepted to exceed the maximum
|
||||
# number of methods.
|
||||
#pylint: disable=too-many-public-methods
|
||||
class Backend(object):
|
||||
"""
|
||||
Backend interface of the pymcuprog utility.
|
||||
This class provides access to all the functionality provided by pymcuprog
|
||||
"""
|
||||
API_VERSION = '2.0'
|
||||
|
||||
def __init__(self):
|
||||
# Hook onto logger
|
||||
self.logger = getLogger(__name__)
|
||||
self.transport = None
|
||||
self.connected_to_tool = False
|
||||
self.session_active = False
|
||||
self.programmer = None
|
||||
self.device_memory_info = None
|
||||
self.housekeeper = None
|
||||
|
||||
def get_api_version(self):
|
||||
"""
|
||||
Returns the current pymcuprog API version
|
||||
"""
|
||||
return self.API_VERSION
|
||||
|
||||
@staticmethod
|
||||
def get_supported_devices():
|
||||
"""
|
||||
Return a list of devices supported by pymcuprog.
|
||||
|
||||
This will be the list of devices with a corresponding device file
|
||||
:returns: List of device names
|
||||
"""
|
||||
devices = []
|
||||
for filename in os.listdir(DEVICE_FOLDER):
|
||||
if filename not in NON_DEVICEFILES and filename.endswith('.py'):
|
||||
devices.append(filename.split('.py')[0])
|
||||
|
||||
return devices
|
||||
|
||||
@staticmethod
|
||||
def get_available_hid_tools(serialnumber_substring='', tool_name=None):
|
||||
"""
|
||||
Return a list of Microchip USB HID tools (debuggers) connected to the host
|
||||
|
||||
:param serialnumber_substring: can be an empty string or a subset of a serial number. Not case sensitive
|
||||
This function will do matching of the last part of the devices serial numbers to
|
||||
the serialnumber_substring. Examples:
|
||||
'123' will match "MCHP3252000000043123" but not "MCP32520001230000000"
|
||||
'' will match any serial number
|
||||
:param tool_name: tool type to connect to. If None any tool matching the serialnumber_substring
|
||||
will be returned
|
||||
:returns: List of pyedbglib.hidtransport.hidtransportbase.HidTool objects
|
||||
"""
|
||||
# Just use a temporary transport as the request is only to report connected Microchip HID tools,
|
||||
# not to connect to any of them
|
||||
transport = hid_transport()
|
||||
|
||||
return transport.get_matching_tools(serialnumber_substring, tool_name)
|
||||
|
||||
def connect_to_tool(self, toolconnection):
|
||||
"""
|
||||
Connect to a tool
|
||||
|
||||
The tool can either be a USB HID tool or a serial port.
|
||||
:param ToolConnection: This is an instance of one of the ToolConnection sub-classes. This object wraps
|
||||
parameters needed to identify which tool to connect to like tool name and USB serial or serial port
|
||||
name (e.g. 'COM1').
|
||||
|
||||
For USB HID tools there are some special handling:
|
||||
- If both tool name and usb_serial are None any tool will be picked.
|
||||
- If usb_serial is None any tool matching the tool name will be picked
|
||||
- If tool name is None any tool matching the usb_serial will be picked
|
||||
- If more than one tool is connected that matches the tool name and usb_serial parameters a
|
||||
PymcuprogToolConnectionError exception will be raised.
|
||||
|
||||
:raises: PymcuprogToolConnectionError if more than one matching tool is found or if no matching tool is found
|
||||
:raises: PymcuprogToolConfigurationError if the toolconnection configuration is incorrect
|
||||
"""
|
||||
if isinstance(toolconnection, ToolSerialConnection):
|
||||
# For serial port connection no connection action is needed, just need to store the
|
||||
# Serial port number to be used (e.g. 'COM1')
|
||||
self.transport = toolconnection.serialport
|
||||
elif isinstance(toolconnection, ToolUsbHidConnection):
|
||||
self.transport = hid_transport()
|
||||
connect_status = False
|
||||
try:
|
||||
connect_status = self.transport.connect(serial_number=toolconnection.serialnumber,
|
||||
product=toolconnection.tool_name)
|
||||
except IOError as error:
|
||||
raise PymcuprogToolConnectionError("Unable to connect to USB device ({})".format(error))
|
||||
|
||||
if not connect_status:
|
||||
raise PymcuprogToolConnectionError("Unable to connect to USB device")
|
||||
|
||||
self.housekeeper = housekeepingprotocol.Jtagice3HousekeepingProtocol(self.transport)
|
||||
self.housekeeper.start_session()
|
||||
|
||||
else:
|
||||
raise PymcuprogToolConfigurationError("Unknown toolconnection argument type: {})".
|
||||
format(type(toolconnection)))
|
||||
|
||||
self.connected_to_tool = True
|
||||
|
||||
def disconnect_from_tool(self):
|
||||
"""
|
||||
Disconnect the connected tool
|
||||
|
||||
If no tool is connected nothing is done (i.e. no exception raised when not connected)
|
||||
"""
|
||||
if self._is_connected_to_hid_tool():
|
||||
self.housekeeper.end_session()
|
||||
self.transport.disconnect()
|
||||
|
||||
self.connected_to_tool = False
|
||||
|
||||
def read_tool_info(self):
|
||||
"""
|
||||
Interrogates tool (debugger) for useful info
|
||||
|
||||
:returns: Dictionary with various info about the connected debugger
|
||||
|
||||
:raises PymcuprogToolConnectionError if not connected to any USB HID tool (connect_to_tool not run)
|
||||
"""
|
||||
self._is_hid_tool_not_connected_raise()
|
||||
|
||||
return read_tool_info(self.housekeeper)
|
||||
|
||||
def read_kit_device(self):
|
||||
"""
|
||||
Read out the device name from kit configuration.
|
||||
|
||||
If the connected tool does not have any kit configuration
|
||||
(i.e. the tool is not an onboard debugger) None will be returned.
|
||||
connect_to_tool must have been called before calling read_kit_device, but start_session is not necessary.
|
||||
Typically read_kit_device is used to get the device name required to configure a session before calling
|
||||
start_session.
|
||||
:returns: Name of target device as given by the kit, None if the tool does not have any device configured.
|
||||
|
||||
:raises PymcuprogToolConnectionError if not connected to any USB HID tool (connect_to_tool not run)
|
||||
"""
|
||||
self._is_hid_tool_not_connected_raise()
|
||||
|
||||
dap_info = read_tool_info(self.housekeeper)
|
||||
|
||||
device_name = dap_info['device_name'].lower()
|
||||
|
||||
if device_name == '':
|
||||
device_name = None
|
||||
|
||||
return device_name
|
||||
|
||||
def read_target_voltage(self):
|
||||
"""
|
||||
Read target voltage
|
||||
|
||||
:returns: Measured target voltage
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogNotSupportedError if the tool does not have supply capabilities
|
||||
"""
|
||||
self._is_hid_tool_not_connected_raise()
|
||||
|
||||
try:
|
||||
voltage = read_target_voltage(self.housekeeper)
|
||||
except Jtagice3ResponseError:
|
||||
raise PymcuprogNotSupportedError("Connected debugger/board does not have target voltage read capability")
|
||||
|
||||
return voltage
|
||||
|
||||
def read_supply_voltage_setpoint(self):
|
||||
"""
|
||||
Read tool power supply voltage setpoint
|
||||
|
||||
:returns: Tool power supply voltage setpoint
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogNotSupportedError if the tool does not have supply capabilities
|
||||
"""
|
||||
self._is_hid_tool_not_connected_raise()
|
||||
|
||||
try:
|
||||
voltage = read_supply_voltage_setpoint(self.housekeeper)
|
||||
except Jtagice3ResponseError:
|
||||
raise PymcuprogNotSupportedError("Connected debugger/board does not have supply voltage capability.")
|
||||
|
||||
return voltage
|
||||
|
||||
def read_usb_voltage(self):
|
||||
"""
|
||||
Read USB voltage
|
||||
|
||||
:returns: Measured USB voltage
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogNotSupportedError if the tool can't measure USB voltage
|
||||
"""
|
||||
self._is_hid_tool_not_connected_raise()
|
||||
|
||||
try:
|
||||
voltage = read_usb_voltage(self.housekeeper)
|
||||
except Jtagice3ResponseError:
|
||||
raise PymcuprogNotSupportedError("Connected debugger/board does not have USB voltage read capability.")
|
||||
|
||||
return voltage
|
||||
|
||||
def set_supply_voltage_setpoint(self, setpoint):
|
||||
"""
|
||||
Set tool power supply voltage setpoint
|
||||
|
||||
:param setpoint: Power supply setpoint
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogNotSupportedError if the tool does not have supply capabilities
|
||||
:raises: ValueError if the setpoint is out of range
|
||||
"""
|
||||
self._is_hid_tool_not_connected_raise()
|
||||
|
||||
set_supply_voltage_setpoint(self.housekeeper, setpoint)
|
||||
|
||||
|
||||
def reboot_tool(self):
|
||||
"""
|
||||
Trigger a reboot of the tool (debugger)
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
"""
|
||||
self._is_hid_tool_not_connected_raise()
|
||||
|
||||
self.housekeeper.end_session(reset_tool=True)
|
||||
|
||||
# A tool reboot will automatically disconnect the tool. Calling self.disconnect_from_tool
|
||||
# would just fail as it would try to talk to a tool while it is rebooting
|
||||
self.connected_to_tool = False
|
||||
|
||||
@staticmethod
|
||||
def get_device_info(device):
|
||||
"""
|
||||
Get info about a device
|
||||
|
||||
:param device: Name of the device
|
||||
:returns: dictionary with device info as defined in the device files in pymcuprog.deviceinfo.devices
|
||||
|
||||
:raises: PymcuprogNotSupportedError if device is not supported
|
||||
"""
|
||||
try:
|
||||
info = deviceinfo.getdeviceinfo(device)
|
||||
except ModuleNotFoundError:
|
||||
raise PymcuprogNotSupportedError("No device info for device: {}".format(device))
|
||||
|
||||
return info
|
||||
|
||||
def start_session(self, sessionconfig, user_interaction_callback=None):
|
||||
"""
|
||||
Start a programming session.
|
||||
|
||||
This function will build the device model stack and initialize the tool for a
|
||||
programming session. If a session is already started calling start_session will do an end_session and start
|
||||
a new session from scratch.
|
||||
|
||||
Note connect_to_tool must have been called before start_session is called. If not an exception will be thrown.
|
||||
|
||||
:param sessionconfig: SessionConfig object wrapping the parameters configuring the session
|
||||
:param user_interaction_callback: Callback to be called when user interaction is required,
|
||||
for example when doing UPDI high-voltage activation with user target power toggle.
|
||||
This function could ask the user to toggle power and halt execution waiting for the user
|
||||
to respond (this is default behavior if the callback is None), or if the user is another
|
||||
script it could toggle power automatically and then return.
|
||||
|
||||
:raises: PymcuprogSessionConfigError if starting the session failed due to incorrectly configured session
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogDeviceLockedError if unable to start the session due to the device being locked
|
||||
:raises: PymcuprogNotSupportedError if configured device is not supported
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
|
||||
# Check that all session configuration parameters required are in place
|
||||
if sessionconfig.device is None or sessionconfig.device == '':
|
||||
raise PymcuprogSessionConfigError("Device must be specified")
|
||||
|
||||
if self.session_active:
|
||||
# A session is already active so it must be ended before starting a new session
|
||||
self.end_session()
|
||||
|
||||
# Setup the programmer
|
||||
self.programmer = Programmer(self.transport)
|
||||
|
||||
if sessionconfig.special_options is not None:
|
||||
self.programmer.set_options(sessionconfig.special_options)
|
||||
|
||||
# Try to build the stack for this device
|
||||
self.programmer.load_device(sessionconfig.device)
|
||||
|
||||
self.programmer.setup_device(
|
||||
sessionconfig.interface,
|
||||
sessionconfig.packpath,
|
||||
sessionconfig.interface_speed)
|
||||
|
||||
# Make contact
|
||||
self.programmer.start(user_interaction_callback=user_interaction_callback)
|
||||
|
||||
# Get device memory info
|
||||
self.device_memory_info = self.programmer.get_device_memory_info()
|
||||
|
||||
self.session_active = True
|
||||
|
||||
def end_session(self):
|
||||
"""
|
||||
End a programming session
|
||||
|
||||
This will take down the device model stack and stop the programming session on the tool. However the tool will
|
||||
not be disconnected and it will be possible to do another start_session without another connect_to_tool call.
|
||||
If no session has been started this function will do nothing (i.e. it won't fail even if a session has
|
||||
not been started)
|
||||
"""
|
||||
if self.session_active:
|
||||
# Lower the flag first to ensure it is updated as the rest of this function might fail with an exception
|
||||
# for example if UPDI were disabled during the session
|
||||
self.session_active = False
|
||||
self.programmer.stop()
|
||||
|
||||
def read_device_id(self):
|
||||
"""
|
||||
Read out the device id
|
||||
|
||||
:return Byte array with device ID as raw byte values. Number of bytes will depend upon target type
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogSessionError if a session has not been started (session_start not run)
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
return self.programmer.read_device_id()
|
||||
|
||||
|
||||
def erase(self, memory_name=MemoryNameAliases.ALL, address=None):
|
||||
"""
|
||||
Erase target device memory
|
||||
|
||||
If a single memory is specified it will only be erased if it won't affect other memories
|
||||
:param memory: name of memory to erase. To unlock a device use the MemoryNameAliases.ALL
|
||||
MemoryNameAliases.ALL run the widest erase:
|
||||
- For PIC the widest bulk erase will be run.
|
||||
- For AVR a chip erase will be run
|
||||
- The following memories will not be erased:
|
||||
- AVR fuses
|
||||
- EEPROM if EESAVE fuse is set for AVR
|
||||
- EEPROM if the target device does not support EEPROM erase
|
||||
- EEPROM if Data Code Protection (CPD_n) is not enabled for PIC
|
||||
- PIC ICD memory (special memory used for Debug Executives)
|
||||
:param address: optional address for erase command. If address is None the complete memory
|
||||
segment will be erased. Note that the address parameter will just propagate through the stack down to the
|
||||
device dependent implementation (devicesupportscripts for PIC and firmware for AVR). Normal use is to
|
||||
leave the address as None.
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogSessionError if a session has not been started (session_start not run)
|
||||
:raises: ValueError if the specified memory is not defined for the target device
|
||||
:raises: PymcuprogEraseError if the memory can't be erased or if the memory can't be erased without affecting
|
||||
other memories
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
if memory_name is not None and memory_name != MemoryNameAliases.ALL:
|
||||
if not self.is_isolated_erase_possible(memory_name):
|
||||
message = "{} memory can't be erased or can't be erased without side effect".format(memory_name)
|
||||
raise PymcuprogEraseError(message)
|
||||
|
||||
self.programmer.erase(memory_name, address)
|
||||
|
||||
def is_isolated_erase_possible(self, memory_name):
|
||||
"""
|
||||
Can the memory be erased without affecting other memories?
|
||||
|
||||
:param memory_name: name of memory
|
||||
:return: True only if the memory can be erased without side effects, False if memory can't be erased at all or
|
||||
if erasing it will erase other memories too.
|
||||
|
||||
:raises ValueError if memory is not defined for the configured device
|
||||
"""
|
||||
# The device model must have been loaded upfront
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
meminfo = self.device_memory_info.memory_info_by_name(memory_name)
|
||||
isolated_erase_key = DeviceMemoryInfoKeys.ISOLATED_ERASE
|
||||
if isolated_erase_key in meminfo:
|
||||
return meminfo[isolated_erase_key] is True
|
||||
|
||||
self.logger.error('%s flag not found for %s memory', isolated_erase_key, memory_name)
|
||||
return False
|
||||
|
||||
def get_chiperase_effect(self, memory_name):
|
||||
"""
|
||||
Get the effect of a chip erase (widest bulk erase) on the given memory
|
||||
|
||||
:param memory_name: name of memory
|
||||
:return: One of the values defined by deviceinfo.eraseflags.ChiperaseEffect depending upon the settings in the
|
||||
device model for the configured device. If the chiperase_effect flag is missing in the device model
|
||||
ChiperaseEffect.NOT_ERASED will be returned.
|
||||
|
||||
:raises ValueError if memory is not defined for the configured device
|
||||
"""
|
||||
# The device model must have been loaded upfront
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
meminfo = self.device_memory_info.memory_info_by_name(memory_name)
|
||||
chiperase_effect_key = DeviceMemoryInfoKeys.CHIPERASE_EFFECT
|
||||
if chiperase_effect_key in meminfo:
|
||||
return meminfo[chiperase_effect_key]
|
||||
|
||||
self.logger.error('%s flag not found for %s memory', chiperase_effect_key, memory_name)
|
||||
return ChiperaseEffect.NOT_ERASED
|
||||
|
||||
def read_memory(self, memory_name=MemoryNameAliases.ALL, offset_byte=0, numbytes=0, max_chunk_size=None):
|
||||
"""
|
||||
Read target device memory
|
||||
|
||||
:param memory_name: Name of memory as defined in memorynames.py. MemoryNameAliases.ALL reads all memories
|
||||
defined in the device model (numbytes and offset_byte will be ignored).
|
||||
:param offset_byte: Byte offset within memory to start reading at.
|
||||
:param numbytes: Number of bytes to read. 0 means read all memory locations from offset_byte and until end
|
||||
of memory
|
||||
:return: list of namedtuples with two fields: data and memory_info. data contains a byte array of
|
||||
raw data bytes and memory_info is a dictionary with memory information (as defined in
|
||||
deviceinfo.deviceinfo.DeviceMemoryInfo). Normally the list will contain one item, but when
|
||||
memory_name parameter is MemoryNameAliases.ALL there will be one namedtuple item per memory
|
||||
type read.
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogSessionError if a session has not been started (session_start not run)
|
||||
:raises: ValueError if trying to read outside the specified memory
|
||||
:raises: ValueError if the specified memory is not defined for the target device
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
return self.programmer.read_memory(memory_name=memory_name, offset=offset_byte, numbytes=numbytes, max_chunk_size=max_chunk_size)
|
||||
|
||||
def write_memory(self, data, memory_name=MemoryNames.FLASH, offset_byte=0, blocksize=0, pagewrite_delay=0):
|
||||
"""
|
||||
Write target device memory
|
||||
|
||||
:param memory_name: Name of memory as defined in memorynames.py
|
||||
:param offset_byte: Byte offset within memory to start writing to.
|
||||
:param data: bytearray of raw data bytes to write
|
||||
:param blocksize: max number of bytes to send at a time. Ignored if 0 or omitted, and not passed
|
||||
to write_memory; only serialupdi supports this.
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogSessionError if a session has not been started (session_start not run)
|
||||
:raises: ValueError if trying to write outside the specified memory
|
||||
:raises: ValueError if the specified memory is not defined for the target device
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
if blocksize == 0:
|
||||
self.programmer.write_memory(data=data, memory_name=memory_name, offset=offset_byte, pagewrite_delay=pagewrite_delay)
|
||||
else:
|
||||
self.programmer.write_memory(data=data, memory_name=memory_name, offset=offset_byte, blocksize=blocksize, pagewrite_delay=pagewrite_delay)
|
||||
|
||||
def verify_memory(self, data, memory_name=MemoryNames.FLASH, offset_byte=0, max_read_chunk=None):
|
||||
"""
|
||||
Verify target device memory
|
||||
|
||||
:param memory_name: Name of memory as defined in DeviceMemoryInfo (deviceinfo.py)
|
||||
:param offset_byte: Byte offset within memory to start verifying at.
|
||||
:param data: bytearray of raw data bytes to verify against
|
||||
:return: boolean compare status
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogSessionError if a session has not been started (session_start not run)
|
||||
:raises: ValueError if trying to verify outside the specified memory
|
||||
:raises: ValueError if the specified memory is not defined for the target device
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
return self.programmer.verify_memory(data=data, memory_name=memory_name, offset=offset_byte, max_read_chunk=max_read_chunk)
|
||||
|
||||
def hold_in_reset(self):
|
||||
"""
|
||||
Hold target device in reset
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogSessionError if a session has not been started (session_start not run)
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
self.programmer.hold_in_reset()
|
||||
|
||||
def release_from_reset(self):
|
||||
"""
|
||||
Release target device from reset
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogSessionError if a session has not been started (session_start not run)
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
self.programmer.release_from_reset()
|
||||
|
||||
# Releasing the target from reset will take it out of programming mode. In other words the session
|
||||
# is partly taken down. To keep housekeeping right and to take down the stack properly end_session
|
||||
# must be called
|
||||
self.end_session()
|
||||
|
||||
def write_hex_to_target(self, hexfile):
|
||||
"""
|
||||
Write hexfile to target device
|
||||
|
||||
Note no erase will be run (i.e. memory is assumed to already be erased)
|
||||
|
||||
:param hexfile: name of file to write
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
hex_memories = read_memories_from_hex(os.path.abspath(hexfile), self.device_memory_info)
|
||||
for segment in hex_memories:
|
||||
memory_name = segment.memory_info[DeviceInfoKeys.NAME]
|
||||
self.logger.debug("Writing %s...", memory_name)
|
||||
self.write_memory(segment.data, memory_name, segment.offset)
|
||||
|
||||
def verify_hex(self, hexfile):
|
||||
"""
|
||||
Verify target memory content against hexfile
|
||||
|
||||
:param hexfile: name of file to verify against
|
||||
:return: boolean compare status
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool (connect_to_tool not run)
|
||||
:raises: PymcuprogSessionError if a session has not been started (session_start not run)
|
||||
"""
|
||||
self._is_tool_not_connected_raise()
|
||||
self._is_session_not_active_raise()
|
||||
|
||||
hex_memories = read_memories_from_hex(os.path.abspath(hexfile), self.device_memory_info)
|
||||
verify_ok = True
|
||||
for segment in hex_memories:
|
||||
memory_name = segment.memory_info[DeviceInfoKeys.NAME]
|
||||
self.logger.debug("Verifying %s...", memory_name)
|
||||
segment_ok = self.verify_memory(segment.data, memory_name, segment.offset, max_read_chunk=max_read_chunk)
|
||||
if segment_ok:
|
||||
self.logger.debug("OK!")
|
||||
else:
|
||||
verify_ok = False
|
||||
|
||||
return verify_ok
|
||||
|
||||
def _is_tool_not_connected_raise(self):
|
||||
"""
|
||||
Check if any tool is connected and if not raise an exception
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool
|
||||
"""
|
||||
if not self._is_connected_to_hid_tool() and not self._is_connected_to_serialport():
|
||||
raise PymcuprogToolConnectionError("Not connected to any tool")
|
||||
|
||||
def _is_hid_tool_not_connected_raise(self):
|
||||
"""
|
||||
Check if a USB HID tool is connected and if not raise an exception
|
||||
|
||||
:raises: PymcuprogToolConnectionError if not connected to any tool
|
||||
"""
|
||||
if not self._is_connected_to_hid_tool():
|
||||
raise PymcuprogToolConnectionError("Not connected to any USB HID debugger")
|
||||
|
||||
def _is_connected_to_hid_tool(self):
|
||||
"""
|
||||
Check if a connection to a USB HID tool is active
|
||||
"""
|
||||
return self.connected_to_tool and isinstance(self.transport, HidTransportBase)
|
||||
|
||||
def _is_connected_to_serialport(self):
|
||||
"""
|
||||
Check if a connection to a Serial port is active
|
||||
"""
|
||||
# For Serial port communication transport is only set to a string with the name of the serial port
|
||||
# to use (e.g. 'COM1').
|
||||
return self.connected_to_tool and isinstance(self.transport, str)
|
||||
|
||||
def _is_session_not_active_raise(self):
|
||||
"""
|
||||
Check if a programming session is active and if not raise an exception
|
||||
|
||||
:raises: PymcuprogSessionError if programming session not active
|
||||
"""
|
||||
if not self.session_active:
|
||||
raise PymcuprogSessionError("No programming session active")
|
||||
290
software/tools/pymcuprog/libs/pymcuprog/deviceinfo/deviceinfo.py
Normal file
290
software/tools/pymcuprog/libs/pymcuprog/deviceinfo/deviceinfo.py
Normal file
@@ -0,0 +1,290 @@
|
||||
"""
|
||||
deviceinfo.py
|
||||
A simple Device Information service
|
||||
|
||||
Device information is stored in files named <devicename>.py in the devices sub-folder
|
||||
Each device file contains a dict of values
|
||||
These device files are [ideally] generated from DFP information by [running generate_device_info.py | hand]
|
||||
"""
|
||||
# Python 3 compatibility for Python 2
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import importlib
|
||||
|
||||
from logging import getLogger
|
||||
|
||||
from pymcuprog.pymcuprog_errors import PymcuprogError
|
||||
from .memorynames import MemoryNames
|
||||
from .deviceinfokeys import DeviceMemoryInfoKeys, DeviceInfoKeys, DeviceInfoKeysPic
|
||||
|
||||
def getdeviceinfo(devicename):
|
||||
"""
|
||||
Looks up device info for a given part
|
||||
|
||||
:param devicename: device to look up
|
||||
:return: device information dict
|
||||
"""
|
||||
logger = getLogger(__name__)
|
||||
logger.info("Looking for device %s", devicename)
|
||||
|
||||
devicename = devicename.lower()
|
||||
|
||||
try:
|
||||
device_module = importlib.import_module("deviceinfo.devices.{}".format(devicename))
|
||||
except ImportError:
|
||||
try:
|
||||
# When pymcuprog is used as a package in other scripts
|
||||
# the deviceinfo module is part of the pymcuprog package
|
||||
device_module = importlib.import_module("pymcuprog.deviceinfo.devices.{}".format(devicename))
|
||||
except ImportError:
|
||||
device_module = importlib.import_module("{}".format(devicename))
|
||||
|
||||
device_info = getattr(device_module, "DEVICE_INFO")
|
||||
|
||||
# For PIC devices there will be a default_bulk_erase_address outside any memory information
|
||||
# This address needs to be converted to byte address
|
||||
default_bulk_erase_address_byte = None
|
||||
for param in device_info:
|
||||
if param.startswith(DeviceInfoKeysPic.DEFAULT_BULK_ERASE_ADDRESS):
|
||||
# Check if it's word or byte oriented data
|
||||
mul = DeviceMemoryInfo.bytes_or_words(param)
|
||||
if mul is not None:
|
||||
default_bulk_erase_address_byte = int(device_info[param] * mul)
|
||||
else:
|
||||
default_bulk_erase_address_byte = device_info[param]
|
||||
|
||||
if default_bulk_erase_address_byte is not None:
|
||||
device_info[DeviceInfoKeysPic.DEFAULT_BULK_ERASE_ADDRESS] = default_bulk_erase_address_byte
|
||||
|
||||
return device_info
|
||||
|
||||
def get_supported_devices():
|
||||
"""
|
||||
Return a list of all supported devices
|
||||
|
||||
A device is supported if it has a device model file in the devices folder
|
||||
"""
|
||||
root_folder = os.path.dirname(os.path.abspath(__file__))
|
||||
dir_list = os.listdir(root_folder + "//devices")
|
||||
ignore_list = ['__init__.py']
|
||||
device_list = []
|
||||
for devicefile in dir_list:
|
||||
if devicefile.endswith(".py") and devicefile not in ignore_list:
|
||||
devicename = devicefile.split('.')[0]
|
||||
device_list.append(devicename)
|
||||
|
||||
return device_list
|
||||
|
||||
class DeviceMemoryInfo:
|
||||
"""
|
||||
API to fetch information about device memory segments
|
||||
"""
|
||||
def __init__(self, device_info):
|
||||
self.device = device_info
|
||||
self.memtypes = MemoryNames.get_all()
|
||||
|
||||
# hexfile_address is the start address for the memory segment in hex files.
|
||||
# PIC and ARM devices usually does not need the parameter as all locations are mapped in a single address space.
|
||||
# AVR8 devices does not map all memory types in a single address space.
|
||||
# Memory types have defined offsets in hex files as defined below
|
||||
self.avr8_hex_file_offsets = {
|
||||
MemoryNames.FLASH: 0x000000,
|
||||
MemoryNames.EEPROM: 0x810000,
|
||||
MemoryNames.FUSES: 0x820000,
|
||||
MemoryNames.LOCKBITS: 0x830000,
|
||||
MemoryNames.SIGNATURES: 0x840000,
|
||||
MemoryNames.USER_ROW: 0x850000
|
||||
}
|
||||
|
||||
# erase_address is the address for the erase of the memory.
|
||||
# Note that for PIC devices other memories might be erased in the same operation depending on the target,
|
||||
# see the programming spec for the target device.
|
||||
|
||||
# erase_address, hexfile_address, hexfile_size and verify mask are optional in the device models.
|
||||
# erase_address will be set to the memory address if it's missing.
|
||||
# Hex file address will be set to the memory address if it's missing, unless it's an AVR device where
|
||||
# the hex file offset is used instead.
|
||||
# Hex file size will be set to the memory size if it's missing except for EEPROM on PIC16 devices where
|
||||
# the hex file will contain phantom bytes so the hex file will contain twice as many EEPROM bytes as
|
||||
# the actual EEPROM in the device
|
||||
# verify_mask is set based on architecture
|
||||
self.paramtypes = [DeviceMemoryInfoKeys.ADDRESS,
|
||||
DeviceMemoryInfoKeys.SIZE,
|
||||
DeviceMemoryInfoKeys.PAGE_SIZE,
|
||||
DeviceMemoryInfoKeys.WRITE_SIZE,
|
||||
DeviceMemoryInfoKeys.READ_SIZE,
|
||||
DeviceMemoryInfoKeys.VERIFY_MASK,
|
||||
DeviceMemoryInfoKeys.ERASE_ADDRESS,
|
||||
DeviceMemoryInfoKeys.HEXFILE_ADDRESS,
|
||||
DeviceMemoryInfoKeys.HEXFILE_SIZE,
|
||||
DeviceMemoryInfoKeys.CHIPERASE_EFFECT,
|
||||
DeviceMemoryInfoKeys.ISOLATED_ERASE]
|
||||
|
||||
self.mem_by_name = {}
|
||||
|
||||
# Find information about memory segments
|
||||
for param in self.device:
|
||||
for mtype in self.memtypes:
|
||||
# Does this line describe a memory location?
|
||||
if param.startswith(mtype):
|
||||
self._configure_memory_param(mtype, param)
|
||||
|
||||
# erase_address and hexfile_address are optional and should default to the value of the address parameter
|
||||
optional_params = [DeviceMemoryInfoKeys.VERIFY_MASK,
|
||||
DeviceMemoryInfoKeys.HEXFILE_ADDRESS,
|
||||
DeviceMemoryInfoKeys.ERASE_ADDRESS,
|
||||
DeviceMemoryInfoKeys.HEXFILE_SIZE]
|
||||
for optional_param in optional_params:
|
||||
for memtype in self.mem_by_name:
|
||||
if optional_param not in self.mem_by_name[memtype]:
|
||||
# Set the verify mask based on architecture
|
||||
if optional_param == DeviceMemoryInfoKeys.VERIFY_MASK:
|
||||
verify_mask = self._get_verify_mask(self.device[DeviceInfoKeys.ARCHITECTURE], memtype)
|
||||
self.mem_by_name[memtype][optional_param] = verify_mask
|
||||
# Set the hexfile_address
|
||||
elif optional_param == DeviceMemoryInfoKeys.HEXFILE_ADDRESS:
|
||||
self._add_hexfile_address(memtype, optional_param)
|
||||
# Set the hexfile_size
|
||||
elif optional_param == DeviceMemoryInfoKeys.HEXFILE_SIZE:
|
||||
self._add_hexfile_size(memtype, optional_param)
|
||||
# Set the erase_address
|
||||
elif optional_param == DeviceMemoryInfoKeys.ERASE_ADDRESS:
|
||||
# By default the erase_address is the same as the address of the memory
|
||||
address = self.mem_by_name[memtype][DeviceMemoryInfoKeys.ADDRESS]
|
||||
self.mem_by_name[memtype][optional_param] = address
|
||||
|
||||
def _configure_memory_param(self, memorytype, param):
|
||||
# Check if it's word or byte oriented data
|
||||
mul = self.bytes_or_words(param)
|
||||
# Create a dict for the memory type if it does not exist
|
||||
if not self.mem_by_name.get(memorytype):
|
||||
self.mem_by_name[memorytype] = {DeviceMemoryInfoKeys.NAME: memorytype}
|
||||
# Parse and store parameter
|
||||
for ptype in self.paramtypes:
|
||||
if param.startswith("{}_{}".format(memorytype, ptype)):
|
||||
if mul is not None:
|
||||
self.mem_by_name[memorytype][ptype] = int(self.device[param] * mul)
|
||||
else:
|
||||
self.mem_by_name[memorytype][ptype] = self.device[param]
|
||||
|
||||
def _add_hexfile_address(self, memorytype, paramname):
|
||||
# Inject hex file addresses for AVR memory areas
|
||||
if self.device[DeviceInfoKeys.ARCHITECTURE].startswith('avr8'):
|
||||
if memorytype in self.avr8_hex_file_offsets:
|
||||
self.mem_by_name[memorytype][paramname] = self.avr8_hex_file_offsets[memorytype]
|
||||
else:
|
||||
# The hexfile_address for memory types that doesn't make sense in a hex file like SRAM
|
||||
# and regular I/O space is defined to an address the other memory types will not reach
|
||||
self.mem_by_name[memorytype][paramname] = 0xFFFFFF
|
||||
# All other memory types are mapped 1 to 1 in the hex file
|
||||
else:
|
||||
self.mem_by_name[memorytype][paramname] = self.mem_by_name[memorytype][DeviceMemoryInfoKeys.ADDRESS]
|
||||
|
||||
def _add_hexfile_size(self, memorytype, paramname):
|
||||
if self.device[DeviceInfoKeys.ARCHITECTURE].startswith('PIC16') and memorytype == MemoryNames.EEPROM:
|
||||
# For PIC16 devices there will be one phantom byte in the hex file for each EEPROM byte, so
|
||||
# the size of EEPROM in a hex file will be twice the size of the actual EEPROM memory
|
||||
self.mem_by_name[memorytype][paramname] = self.mem_by_name[memorytype][DeviceMemoryInfoKeys.SIZE] * 2
|
||||
else:
|
||||
self.mem_by_name[memorytype][paramname] = self.mem_by_name[memorytype][DeviceMemoryInfoKeys.SIZE]
|
||||
|
||||
@staticmethod
|
||||
def _get_verify_mask(architecture, memtype):
|
||||
# byte oriented memory
|
||||
mask = [0xFF]
|
||||
|
||||
# PIC16 is word addressed and has 14-bit flash, except EEPROM which is byte oriented
|
||||
if architecture == 'PIC16' and memtype not in [MemoryNames.EEPROM]:
|
||||
mask = [0xFF, 0x3F]
|
||||
|
||||
# PIC18 is word addressed and has 16-bit flash, except EEPROM which is byte oriented
|
||||
elif architecture == 'PIC18' and memtype not in [MemoryNames.EEPROM]:
|
||||
mask = [0xFF, 0xFF]
|
||||
|
||||
# PIC24 is word addressed and has 24-bit flash, except EEPROM which is word oriented
|
||||
elif architecture == 'PIC24':
|
||||
if memtype in [MemoryNames.EEPROM]:
|
||||
mask = [0xFF, 0xFF]
|
||||
else:
|
||||
mask = [0xFF, 0xFF, 0xFF, 0x00]
|
||||
|
||||
return mask
|
||||
|
||||
@staticmethod
|
||||
def bytes_or_words(address_param):
|
||||
"""
|
||||
Return multiplier for address parameter
|
||||
|
||||
The returned multiplier can be used to convert the address parameter to byte address
|
||||
:param address_param: Address parameter (used as key in device info dict)
|
||||
:return: Multiplier to convert the address to byte address
|
||||
"""
|
||||
if address_param.endswith("_byte") or address_param.endswith("_bytes"):
|
||||
mul = 1
|
||||
elif address_param.endswith("_word") or address_param.endswith("_words"):
|
||||
mul = 2
|
||||
else:
|
||||
mul = None
|
||||
return mul
|
||||
|
||||
def memory_info_by_address_range(self,
|
||||
start,
|
||||
stop,
|
||||
address_type=DeviceMemoryInfoKeys.ADDRESS,
|
||||
size_type=DeviceMemoryInfoKeys.SIZE):
|
||||
"""
|
||||
Returns a list of all memories applicable for the address range(start, stop)
|
||||
|
||||
:param start: Start address (byte)
|
||||
:param stop: End address (byte)
|
||||
:param address_type: Selects between normal addresses and addresses used in hex files
|
||||
(address vs hexfile_address)
|
||||
:param size_type: Selects between normal size and size used in hexfiles (size vs hexfile_size)
|
||||
"""
|
||||
# We do not support negative memory ranges
|
||||
if start > stop:
|
||||
raise PymcuprogError("Cannot parse reverse memory range {} to {}".format(start, stop))
|
||||
|
||||
memtypes = []
|
||||
|
||||
# Loop through all known memory types for this device
|
||||
for memtype in self.mem_by_name:
|
||||
address = self.mem_by_name[memtype][address_type]
|
||||
size = self.mem_by_name[memtype][size_type]
|
||||
|
||||
# Check if any of the addresses between start and stop is within the memory type range
|
||||
if start < address+size and stop > address:
|
||||
memtypes.append(self.mem_by_name[memtype])
|
||||
return memtypes
|
||||
|
||||
def memory_info_by_address(self,
|
||||
byte_address,
|
||||
address_type=DeviceMemoryInfoKeys.ADDRESS,
|
||||
size_type=DeviceMemoryInfoKeys.SIZE):
|
||||
"""
|
||||
Returns information about the memory type for a given byte address
|
||||
|
||||
:param byte_address: Memory address to check
|
||||
:param address_type: Selects between normal addresses and addresses used in hex files
|
||||
(ADDRESS vs HEXFILE_ADDRESS)
|
||||
:param size_type: Selects between normal size and size used in hexfiles (size vs hexfile_size)
|
||||
"""
|
||||
memtype = None
|
||||
for memory in self.mem_by_name:
|
||||
if self.mem_by_name[memory][address_type] <= byte_address < \
|
||||
self.mem_by_name[memory][address_type] + self.mem_by_name[memory][size_type]:
|
||||
if memtype is not None:
|
||||
raise PymcuprogError("Duplicate memory area found for byte address '{}'".format(byte_address))
|
||||
memtype = self.mem_by_name[memory]
|
||||
return memtype
|
||||
|
||||
def memory_info_by_name(self, name):
|
||||
"""
|
||||
Returns information about the requested memory
|
||||
"""
|
||||
memory = self.mem_by_name.get(name)
|
||||
if not memory:
|
||||
message = "Memory type '{}' not defined for device '{}'".format(name, self.device[DeviceInfoKeys.NAME])
|
||||
raise ValueError(message)
|
||||
return memory
|
||||
@@ -0,0 +1,88 @@
|
||||
#pylint: disable=too-few-public-methods
|
||||
"""
|
||||
Definitions of keys for device info dictionaries
|
||||
"""
|
||||
|
||||
class DeviceInfoKeys(object):
|
||||
"""
|
||||
Base class with common device info keys
|
||||
"""
|
||||
|
||||
NAME = 'name'
|
||||
ARCHITECTURE = 'architecture'
|
||||
INTERFACE = 'interface'
|
||||
DEVICE_ID = 'device_id'
|
||||
|
||||
@classmethod
|
||||
def get_all(cls):
|
||||
"""
|
||||
Get a list of all keys
|
||||
|
||||
:return List of all valid keys (baseclass and any subclass keys if run on a subclass)
|
||||
"""
|
||||
all_keys = []
|
||||
for attribute in dir(cls):
|
||||
if not attribute.startswith('__') and not callable(getattr(cls, attribute)):
|
||||
all_keys.append(getattr(cls, attribute))
|
||||
|
||||
return all_keys
|
||||
|
||||
class DeviceInfoKeysAvr(DeviceInfoKeys):
|
||||
"""
|
||||
Keys specific to AVR device info files
|
||||
"""
|
||||
|
||||
NVMCTRL_BASE = 'nvmctrl_base'
|
||||
SYSCFG_BASE = 'syscfg_base'
|
||||
OCD_BASE = 'ocd_base'
|
||||
PROG_CLOCK_KHZ = 'prog_clock_khz'
|
||||
ADDRESS_SIZE = 'address_size'
|
||||
|
||||
class DeviceInfoKeysAvr32(DeviceInfoKeys):
|
||||
"""
|
||||
Keys specific to 32-bit AVR device info files
|
||||
"""
|
||||
|
||||
RESET_DOMAINS = 'reset_domains'
|
||||
|
||||
class DeviceInfoKeysPic(DeviceInfoKeys):
|
||||
"""
|
||||
Keys specific to PIC device info files
|
||||
"""
|
||||
|
||||
# This key should have _byte or _word ending in device info files to specify byte or word address
|
||||
# This ending will be removed by the getdeviceinfo function before returning the device info dictionary
|
||||
DEFAULT_BULK_ERASE_ADDRESS = 'default_bulk_erase_address'
|
||||
|
||||
class DeviceMemoryInfoKeys(object):
|
||||
"""
|
||||
Keys for device memory info dictionary
|
||||
|
||||
These keys are found in the dictionaries returned by DeviceMemoryInfo for each memory type
|
||||
"""
|
||||
NAME = 'name'
|
||||
ADDRESS = 'address'
|
||||
SIZE = 'size'
|
||||
PAGE_SIZE = 'page_size'
|
||||
WRITE_SIZE = 'write_size'
|
||||
READ_SIZE = 'read_size'
|
||||
ERASE_ADDRESS = 'erase_address'
|
||||
CHIPERASE_EFFECT = 'chiperase_effect'
|
||||
ISOLATED_ERASE = 'isolated_erase'
|
||||
HEXFILE_ADDRESS = 'hexfile_address'
|
||||
HEXFILE_SIZE = 'hexfile_size'
|
||||
VERIFY_MASK = 'verify_mask'
|
||||
|
||||
@classmethod
|
||||
def get_all(cls):
|
||||
"""
|
||||
Get a list of all keys
|
||||
|
||||
:return List of all valid keys (baseclass and any subclass keys if run on a subclass)
|
||||
"""
|
||||
all_keys = []
|
||||
for attribute in dir(cls):
|
||||
if not attribute.startswith('__') and not callable(getattr(cls, attribute)):
|
||||
all_keys.append(getattr(cls, attribute))
|
||||
|
||||
return all_keys
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1604 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1604',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3c00,
|
||||
'internal_sram_size_bytes': 0x0400,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9425,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1606 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1606',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3c00,
|
||||
'internal_sram_size_bytes': 0x0400,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9424,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1607 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1607',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3c00,
|
||||
'internal_sram_size_bytes': 0x0400,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9423,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1614 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1614',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3800,
|
||||
'internal_sram_size_bytes': 0x0800,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9422,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1616 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1616',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3800,
|
||||
'internal_sram_size_bytes': 0x0800,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9421,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1617 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1617',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3800,
|
||||
'internal_sram_size_bytes': 0x0800,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9420,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1624 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1624',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3800,
|
||||
'internal_sram_size_bytes': 0x0800,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E942A,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1626 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1626',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3800,
|
||||
'internal_sram_size_bytes': 0x0800,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9429,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny1627 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny1627',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3800,
|
||||
'internal_sram_size_bytes': 0x0800,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x4000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9428,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny202 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny202',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0040,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f80,
|
||||
'internal_sram_size_bytes': 0x0080,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x0800,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9123,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny204 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny204',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0040,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f80,
|
||||
'internal_sram_size_bytes': 0x0080,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x0800,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9122,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny212 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny212',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0040,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f80,
|
||||
'internal_sram_size_bytes': 0x0080,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x0800,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9121,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny214 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny214',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0040,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f80,
|
||||
'internal_sram_size_bytes': 0x0080,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x0800,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9120,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny3216 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny3216',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x40,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3800,
|
||||
'internal_sram_size_bytes': 0x0800,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x80,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x40,
|
||||
'user_row_page_size_bytes': 0x40,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x8000,
|
||||
'flash_page_size_bytes': 0x80,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x80,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9521,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny3217 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny3217',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x40,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3800,
|
||||
'internal_sram_size_bytes': 0x0800,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x80,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x40,
|
||||
'user_row_page_size_bytes': 0x40,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x8000,
|
||||
'flash_page_size_bytes': 0x80,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x80,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9522,
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny3224 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny3224',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x40,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3400,
|
||||
'internal_sram_size_bytes': 0x0C00,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x80,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x40,
|
||||
'user_row_page_size_bytes': 0x40,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x8000,
|
||||
'flash_page_size_bytes': 0x80,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x80,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9528,
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny3226 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny3226',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x40,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3400,
|
||||
'internal_sram_size_bytes': 0x0C00,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x80,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x40,
|
||||
'user_row_page_size_bytes': 0x40,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x8000,
|
||||
'flash_page_size_bytes': 0x80,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x80,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9527,
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny3227 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny3227',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0100,
|
||||
'eeprom_page_size_bytes': 0x40,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3400,
|
||||
'internal_sram_size_bytes': 0x0C00,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x80,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x40,
|
||||
'user_row_page_size_bytes': 0x40,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x8000,
|
||||
'flash_page_size_bytes': 0x80,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x80,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9526,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny402 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny402',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f00,
|
||||
'internal_sram_size_bytes': 0x0100,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9227,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny404 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny404',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f00,
|
||||
'internal_sram_size_bytes': 0x0100,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9226,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny406 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny406',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f00,
|
||||
'internal_sram_size_bytes': 0x0100,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9225,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny412 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny412',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f00,
|
||||
'internal_sram_size_bytes': 0x0100,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9223,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny414 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny414',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f00,
|
||||
'internal_sram_size_bytes': 0x0100,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9222,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny416 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny416',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f00,
|
||||
'internal_sram_size_bytes': 0x0100,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9221,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny417 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny417',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3f00,
|
||||
'internal_sram_size_bytes': 0x0100,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9220,
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny424 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, and his users were complaining,
|
||||
it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny424',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E922C,
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny426 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, and his users were complaining,
|
||||
it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny426',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E922B,
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny427 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, and his users were complaining,
|
||||
it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny427',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x1000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E922A,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny804 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny804',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9325,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny806 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny806',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9324,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny807 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny807',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9323,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny814 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny814',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9322,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny816 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny816',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9321,
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny817 devices
|
||||
The following data was collected from device pack Microchip.ATtiny_DFP 2.4.111
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny817',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3e00,
|
||||
'internal_sram_size_bytes': 0x0200,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9320,
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny824 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, and his users were complaining,
|
||||
it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny824',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3c00,
|
||||
'internal_sram_size_bytes': 0x0400,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9329,
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny826 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, and his users were complaining,
|
||||
it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny826',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3c00,
|
||||
'internal_sram_size_bytes': 0x0400,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9328,
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
|
||||
"""
|
||||
Required device info for the attiny827 devices
|
||||
The following data would normally have been collected from device packs.
|
||||
But since Microchip hasn't done this, and his users were complaining,
|
||||
it was deduced from device packs by Spence Konde.
|
||||
"""
|
||||
|
||||
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
|
||||
|
||||
DEVICE_INFO = {
|
||||
'name': 'attiny827',
|
||||
'architecture': 'avr8x',
|
||||
|
||||
# eeprom
|
||||
'eeprom_address_byte': 0x00001400,
|
||||
'eeprom_size_bytes': 0x0080,
|
||||
'eeprom_page_size_bytes': 0x20,
|
||||
'eeprom_read_size_bytes': 1,
|
||||
'eeprom_write_size_bytes': 1,
|
||||
'eeprom_chiperase_effect': ChiperaseEffect.CONDITIONALLY_ERASED_AVR,
|
||||
'eeprom_isolated_erase': True,
|
||||
|
||||
# fuses
|
||||
'fuses_address_byte': 0x00001280,
|
||||
'fuses_size_bytes': 0xA,
|
||||
'fuses_page_size_bytes': 1,
|
||||
'fuses_read_size_bytes': 1,
|
||||
'fuses_write_size_bytes': 1,
|
||||
'fuses_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'fuses_isolated_erase': False,
|
||||
|
||||
# internal_sram
|
||||
'internal_sram_address_byte': 0x3c00,
|
||||
'internal_sram_size_bytes': 0x0400,
|
||||
'internal_sram_page_size_bytes': 1,
|
||||
'internal_sram_read_size_bytes': 1,
|
||||
'internal_sram_write_size_bytes': 1,
|
||||
'internal_sram_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'internal_sram_isolated_erase': False,
|
||||
|
||||
# lockbits
|
||||
'lockbits_address_byte': 0x0000128A,
|
||||
'lockbits_size_bytes': 0x1,
|
||||
'lockbits_page_size_bytes': 1,
|
||||
'lockbits_read_size_bytes': 1,
|
||||
'lockbits_write_size_bytes': 1,
|
||||
'lockbits_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'lockbits_isolated_erase': False,
|
||||
|
||||
# signatures
|
||||
'signatures_address_byte': 0x00001100,
|
||||
'signatures_size_bytes': 0x3,
|
||||
'signatures_page_size_bytes': 0x40,
|
||||
'signatures_read_size_bytes': 1,
|
||||
'signatures_write_size_bytes': 0,
|
||||
'signatures_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'signatures_isolated_erase': False,
|
||||
|
||||
# user_row
|
||||
'user_row_address_byte': 0x00001300,
|
||||
'user_row_size_bytes': 0x20,
|
||||
'user_row_page_size_bytes': 0x20,
|
||||
'user_row_read_size_bytes': 1,
|
||||
'user_row_write_size_bytes': 1,
|
||||
'user_row_chiperase_effect': ChiperaseEffect.NOT_ERASED,
|
||||
'user_row_isolated_erase': True,
|
||||
|
||||
# flash
|
||||
'flash_address_byte': 0x00008000,
|
||||
'flash_size_bytes': 0x2000,
|
||||
'flash_page_size_bytes': 0x40,
|
||||
'flash_read_size_bytes': 2,
|
||||
'flash_write_size_bytes': 0x40,
|
||||
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
|
||||
'flash_isolated_erase': True,
|
||||
|
||||
# Some extra AVR specific fields
|
||||
'nvmctrl_base': 0x00001000,
|
||||
'syscfg_base': 0x00000F00,
|
||||
'ocd_base': 0x00000F80,
|
||||
'prog_clock_khz': 900,
|
||||
'interface': 'UPDI',
|
||||
'address_size': '16-bit',
|
||||
'device_id': 0x1E9327,
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
Definitions of erase related flags for the device models
|
||||
"""
|
||||
import inspect
|
||||
|
||||
from pymcuprog.utils import enum
|
||||
|
||||
# Flag used to specify if a memory type will be erased by a chip erase (AVR) or the widest/default bulk erase (PIC)
|
||||
ChiperaseEffect = enum(
|
||||
ALWAYS_ERASED='always erased',
|
||||
CONDITIONALLY_ERASED_AVR='conditionally erased (depending upon EESAVE fuse setting)',
|
||||
CONDITIONALLY_ERASED_PIC='conditionally erased (depending upon Code Protect configuration bit(s) settings)',
|
||||
NOT_ERASED='not erased')
|
||||
|
||||
def get_list_of_chiperase_effects():
|
||||
"""Return a list of all ChiperaseEffect values"""
|
||||
chiperase_effect_attributes = inspect.getmembers(ChiperaseEffect, lambda a: not inspect.isroutine(a))
|
||||
chiperase_effect_values = []
|
||||
for attribute in chiperase_effect_attributes:
|
||||
# Builtin routines always starts and ends with double underscore (__)
|
||||
if not (attribute[0].startswith('__') and attribute[0].endswith('__')):
|
||||
# Only the attribute values are returned
|
||||
chiperase_effect_values.append(attribute[1])
|
||||
|
||||
return chiperase_effect_values
|
||||
@@ -0,0 +1,41 @@
|
||||
#pylint: disable=too-few-public-methods
|
||||
"""
|
||||
Memory name definitions
|
||||
"""
|
||||
|
||||
class MemoryNameAliases(object):
|
||||
"""
|
||||
Memory names that are actually not real memories but an alias for several memories
|
||||
"""
|
||||
ALL = 'all'
|
||||
|
||||
class MemoryNames(object):
|
||||
"""
|
||||
Memory names corresponding to target device memories
|
||||
"""
|
||||
# Real memories
|
||||
FLASH = 'flash'
|
||||
CONFIG_WORD = 'config_words'
|
||||
USER_ID = 'user_id'
|
||||
USER_ROW = 'user_row'
|
||||
EEPROM = 'eeprom'
|
||||
FUSES = 'fuses'
|
||||
CALIBRATION_ROW = 'calibration_row'
|
||||
ICD = 'icd'
|
||||
LOCKBITS = 'lockbits'
|
||||
SIGNATURES = 'signatures'
|
||||
INTERNAL_SRAM = 'internal_sram'
|
||||
|
||||
@classmethod
|
||||
def get_all(cls):
|
||||
"""
|
||||
Get a list of all memories representing actual device memories
|
||||
|
||||
:return List of all memory names representing actual device memories
|
||||
"""
|
||||
all_memories = []
|
||||
for attribute in dir(cls):
|
||||
if not attribute.startswith('__') and not callable(getattr(cls, attribute)):
|
||||
all_memories.append(getattr(cls, attribute))
|
||||
|
||||
return all_memories
|
||||
181
software/tools/pymcuprog/libs/pymcuprog/hexfileutils.py
Normal file
181
software/tools/pymcuprog/libs/pymcuprog/hexfileutils.py
Normal file
@@ -0,0 +1,181 @@
|
||||
"""
|
||||
Module providing read and write functionality towards hex files with data intended for target device memories
|
||||
"""
|
||||
import copy
|
||||
import os
|
||||
from array import array
|
||||
from collections import namedtuple
|
||||
from intelhex import IntelHex
|
||||
try:
|
||||
from pathlib import Path
|
||||
except ImportError:
|
||||
from pathlib2 import Path # python 2 backport
|
||||
|
||||
from .deviceinfo.deviceinfokeys import DeviceMemoryInfoKeys, DeviceInfoKeys
|
||||
|
||||
def write_memories_to_hex(filename, memory_segments):
|
||||
"""
|
||||
Write a collection of memory segments to a hex file
|
||||
|
||||
Each segment will be written from relative offset 0 (i.e. start of each memory segment)
|
||||
:param filename: Name/path of hex file to write to
|
||||
:param memory_segments: list of namedtuples with two fields: data and memory_info. data contains a
|
||||
byte array of raw data bytes and memory_info is a dictionary with memory information as defined
|
||||
in deviceinfo.deviceinfo.DeviceMemoryInfo.
|
||||
"""
|
||||
hexfile = IntelHex()
|
||||
|
||||
for memory_segment in memory_segments:
|
||||
_add_data_to_hex(hexfile, memory_segment.data, memory_segment.memory_info)
|
||||
|
||||
_write_hex_to_file(hexfile, filename)
|
||||
|
||||
def write_memory_to_hex(filename, memory_segment, offset):
|
||||
"""
|
||||
Write one memory segment to a hex file with data starting at relative offset given by offset parameter.
|
||||
|
||||
:param filename: Name/path of hex file to write to
|
||||
:param memory_segment: namedtuple with two fields: data and memory_info. data contains a byte array
|
||||
of raw data bytes and memory_info is a dictionary with memory information as defined in
|
||||
deviceinfo.deviceinfo.DeviceMemoryInfo).
|
||||
:param offset: Relative offset for the data within the memory segment
|
||||
"""
|
||||
hexfile = IntelHex()
|
||||
|
||||
_add_data_to_hex(hexfile, memory_segment.data, memory_segment.memory_info, offset)
|
||||
|
||||
_write_hex_to_file(hexfile, filename)
|
||||
|
||||
def read_memories_from_hex(filename, device_memory_info):
|
||||
"""
|
||||
Read the content of a hexfile
|
||||
|
||||
:param filename: Name/path of hex file to read from
|
||||
:param device_memory_info: DeviceMemoryInfo instance for the device the hex file is intended for
|
||||
:returns: list of namedtuples with three fields: data, offset and memory_info. data contains a byte array
|
||||
of raw data bytes, offset is the start address within the memory the data starts at and memory_info
|
||||
is a dictionary with the memory info as defined in pymcuprog.deviceinfo.deviceinfo
|
||||
"""
|
||||
hexfile = IntelHex()
|
||||
hexfile.fromfile(filename, format='hex')
|
||||
|
||||
memory_segments = []
|
||||
for segment in hexfile.segments():
|
||||
start = segment[0]
|
||||
stop = segment[1]
|
||||
|
||||
subsegment_start = start
|
||||
subsegment_stop = start
|
||||
while subsegment_stop < stop:
|
||||
current_memory_info = device_memory_info.memory_info_by_address(subsegment_start,
|
||||
DeviceMemoryInfoKeys.HEXFILE_ADDRESS,
|
||||
DeviceMemoryInfoKeys.HEXFILE_SIZE)
|
||||
|
||||
if current_memory_info is None:
|
||||
raise IndexError(
|
||||
"Hexfile contains data at hex address 0x{:X} which is outside any memory".format(subsegment_start))
|
||||
|
||||
current_hexfile_address = current_memory_info[DeviceMemoryInfoKeys.HEXFILE_ADDRESS]
|
||||
current_hexfile_size = current_memory_info[DeviceMemoryInfoKeys.HEXFILE_SIZE]
|
||||
subsegment_stop = current_hexfile_address + current_hexfile_size
|
||||
if stop < subsegment_stop:
|
||||
# Reached end of segment
|
||||
subsegment_stop = stop
|
||||
memory_tuple = namedtuple('MemorySegment', 'data offset memory_info')
|
||||
|
||||
data = hexfile.tobinarray(start=subsegment_start, end=subsegment_stop - 1)
|
||||
current_size = current_memory_info[DeviceMemoryInfoKeys.SIZE]
|
||||
if current_hexfile_size == current_size*2:
|
||||
# There are phantom bytes in the hexfile (PIC16 EEPROM), so every 2nd byte should be removed
|
||||
data = remove_phantom_bytes(data)
|
||||
|
||||
memory_tuple.data = data
|
||||
memory_tuple.memory_info = current_memory_info
|
||||
memory_tuple.offset = subsegment_start - current_hexfile_address
|
||||
|
||||
memory_segments.append(copy.deepcopy(memory_tuple))
|
||||
|
||||
subsegment_start = subsegment_stop
|
||||
|
||||
return memory_segments
|
||||
|
||||
|
||||
def verify_flash_from_hex(hex_filename, backend, max_read_chunk=None):
|
||||
"""
|
||||
Verify the contents of flash against a hex-file
|
||||
|
||||
:param filename: Name/path of hex-file to verify
|
||||
:param device_memory_info: DeviceMemoryInfo instance for the device the hex file should be verified against
|
||||
:param backend: Reference to the Backend class of pymcuprog
|
||||
:returns: Boolean value indicating success or failure of the operation
|
||||
"""
|
||||
hexfile = IntelHex(hex_filename)
|
||||
segments = hexfile.segments()
|
||||
|
||||
for i in range(len(segments)):
|
||||
segment_data = []
|
||||
for j in range(segments[i][1]-segments[i][0]):
|
||||
segment_data.append(hexfile[segments[i][0]+j])
|
||||
|
||||
verify_status = backend.verify_memory(segment_data, 'flash', segments[i][0], max_read_chunk=max_read_chunk)
|
||||
if verify_status is False:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def remove_phantom_bytes(data):
|
||||
"""
|
||||
Remove every 2nd byte from the data
|
||||
"""
|
||||
data_stripped = []
|
||||
for index in range(0, len(data), 2):
|
||||
data_stripped.append(data[index])
|
||||
# Make a bin array out of the data list to be consistent with the data format of
|
||||
# the data fetched directly from the hex file
|
||||
data_stripped_binarray = array('B')
|
||||
data_stripped_binarray.fromlist(data_stripped)
|
||||
return data_stripped_binarray
|
||||
|
||||
def _add_data_to_hex(intelhex, data, memory_info, offset=0):
|
||||
"""
|
||||
Add given data starting at relative index offset to IntelHex instance intelhex
|
||||
|
||||
:param intelhex: IntelHex object
|
||||
:param data: raw data bytes
|
||||
:param memory_info: memory info as provided by pymcuprog.deviceinfo.deviceinfo
|
||||
:param offset: relative offset within the memory
|
||||
"""
|
||||
hexfile_address_key = DeviceMemoryInfoKeys.HEXFILE_ADDRESS
|
||||
hexfile_size_key = DeviceMemoryInfoKeys.HEXFILE_SIZE
|
||||
size_key = DeviceMemoryInfoKeys.SIZE
|
||||
name = memory_info[DeviceInfoKeys.NAME]
|
||||
|
||||
if offset+len(data) > memory_info[hexfile_size_key]:
|
||||
raise IndexError(
|
||||
"Attempting to write outside boundary of {} memory ({} bytes starting at offset {})".format(name,
|
||||
len(data),
|
||||
offset))
|
||||
|
||||
hex_offset = memory_info[hexfile_address_key] + offset
|
||||
if memory_info[hexfile_size_key] == memory_info[size_key]*2:
|
||||
# Hex file should contain one phantom byte per data byte in the hex file (PIC16 EEPROM)
|
||||
for i, dat in enumerate(data):
|
||||
intelhex[i*2 + hex_offset] = data[i]
|
||||
intelhex[i*2 + 1 + hex_offset] = 0 & 0xFF
|
||||
else:
|
||||
for i, dat in enumerate(data):
|
||||
intelhex[i + hex_offset] = dat
|
||||
|
||||
def _write_hex_to_file(intelhex, filename):
|
||||
"""
|
||||
Write intelhex object to file.
|
||||
|
||||
Directories will be created if path does not exist
|
||||
:param intelhex: IntelHex instance
|
||||
:param filename: Name/path to write intelhex object to
|
||||
"""
|
||||
directory = os.path.dirname(filename)
|
||||
if directory != '' and not os.path.exists(directory):
|
||||
Path(directory).mkdir(exist_ok=True, parents=True)
|
||||
|
||||
intelhex.write_hex_file(filename)
|
||||
57
software/tools/pymcuprog/libs/pymcuprog/logging.yaml
Normal file
57
software/tools/pymcuprog/libs/pymcuprog/logging.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
version: 1
|
||||
disable_existing_loggers: False
|
||||
formatters:
|
||||
timestamped:
|
||||
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
detailed:
|
||||
format: "%(name)s - %(levelname)s - %(message)s"
|
||||
simple:
|
||||
format: "%(message)s"
|
||||
|
||||
handlers:
|
||||
# Logging to the console is default to WARNING with detailed output:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
level: WARNING
|
||||
formatter: detailed
|
||||
stream: ext://sys.stdout
|
||||
|
||||
# Logging debug output to file
|
||||
# Handler disabled by default - for reference only
|
||||
debug_file_handler:
|
||||
class: logging.FileHandler
|
||||
level: DEBUG
|
||||
formatter: timestamped
|
||||
# File path will be user log directory for this application
|
||||
filename: debug.log
|
||||
encoding: utf8
|
||||
|
||||
# Logging errors to file
|
||||
# Handler disabled by default - for reference only
|
||||
error_file_handler:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
level: ERROR
|
||||
formatter: timestamped
|
||||
# File path will be user log directory for this application
|
||||
filename: errors.log
|
||||
maxBytes: 10485760 # 10MB
|
||||
backupCount: 20
|
||||
encoding: utf8
|
||||
|
||||
loggers:
|
||||
# pyedbglib library should be kept to critical errors to console only
|
||||
pyedbglib:
|
||||
level: ERROR
|
||||
handlers: [console]
|
||||
propagate: no
|
||||
|
||||
root:
|
||||
# Default level is warning
|
||||
# this is increased with -v <level> in CLI usage
|
||||
level: WARNING
|
||||
# Default handlers is console only
|
||||
handlers: [console]
|
||||
# Add debug_file_handler for debug output to file
|
||||
# Add error_file_handler for error output to file
|
||||
# See configuration in handlers section above
|
||||
#handlers: [console, debug_file_handler, error_file_handler]
|
||||
130
software/tools/pymcuprog/libs/pymcuprog/nvm.py
Normal file
130
software/tools/pymcuprog/libs/pymcuprog/nvm.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
NVM layer protocols
|
||||
"""
|
||||
# Python 3 compatibility for Python 2
|
||||
from __future__ import print_function
|
||||
from logging import getLogger
|
||||
|
||||
from .deviceinfo.deviceinfokeys import DeviceInfoKeys
|
||||
|
||||
def get_nvm_access_provider(transport, device_info, interface="", packpath=None, frequency=None, options=""):
|
||||
"""
|
||||
Returns an NVM provider with the requested properties
|
||||
|
||||
:param transport: transport layer object
|
||||
:param device_info: device info dict
|
||||
:param interface: physical interface for NVM
|
||||
:param packpath: path to pack
|
||||
:param frequency: interface clock
|
||||
:param options: special options
|
||||
:return: NVM access object
|
||||
"""
|
||||
# Although it is considered best practice to have imports at top level, in this case it makes sense to have the
|
||||
# imports on the function level as in most cases only one import will be used. Having all imports at the top
|
||||
# level will then be a waste of resources.
|
||||
#pylint: disable=import-outside-toplevel
|
||||
# There will be cyclic imports since the modules imported below containing NVM Access providers will import
|
||||
# from the current module since all NVM Access providers inherits from the NVM Access provider base classes
|
||||
# defined in the current module, but this should be ok since the imports below are late.
|
||||
#pylint: disable=cyclic-import
|
||||
accessprovider = None
|
||||
architecture = device_info[DeviceInfoKeys.ARCHITECTURE].lower()
|
||||
if DeviceInfoKeys.INTERFACE in device_info:
|
||||
interface = device_info[DeviceInfoKeys.INTERFACE].lower()
|
||||
|
||||
if architecture in ['pic16', 'pic18', 'pic24']:
|
||||
from .nvmpic import NvmAccessProviderCmsisDapPic
|
||||
accessprovider = NvmAccessProviderCmsisDapPic(transport, device_info, packpath, options=options)
|
||||
elif architecture == 'avr8x':
|
||||
if isinstance(transport, str):
|
||||
if interface == 'updi':
|
||||
from .nvmserialupdi import NvmAccessProviderSerial
|
||||
accessprovider = NvmAccessProviderSerial(transport, device_info, baud=frequency)
|
||||
elif interface == 'updi':
|
||||
from .nvmupdi import NvmAccessProviderCmsisDapUpdi
|
||||
accessprovider = NvmAccessProviderCmsisDapUpdi(transport, device_info=device_info,
|
||||
frequency=frequency, options=options)
|
||||
elif architecture == 'avr8':
|
||||
if interface == 'isp':
|
||||
if interface == "debugwire":
|
||||
from .nvmdebugwire import NvmAccessProviderCmsisDapDebugwire
|
||||
accessprovider = NvmAccessProviderCmsisDapDebugwire(transport, device_info)
|
||||
else:
|
||||
from .nvmspi import NvmAccessProviderCmsisDapSpi
|
||||
accessprovider = NvmAccessProviderCmsisDapSpi(transport, device_info)
|
||||
elif architecture == 'cortex-m0plus':
|
||||
from .nvmmzeroplus import NvmAccessProviderCmsisDapMZeroPlus
|
||||
accessprovider = NvmAccessProviderCmsisDapMZeroPlus(transport, device_info, frequency)
|
||||
elif architecture == 'avr32':
|
||||
from .nvmavr32 import NvmAccessProviderCmsisDapAvr32
|
||||
accessprovider = NvmAccessProviderCmsisDapAvr32(transport, device_info)
|
||||
|
||||
return accessprovider
|
||||
|
||||
class NvmAccessProvider:
|
||||
"""
|
||||
Wrapper for device info
|
||||
"""
|
||||
|
||||
def __init__(self, device_info):
|
||||
self.device_info = device_info
|
||||
self.logger = getLogger(__name__)
|
||||
|
||||
def _log_incomplete_stack(self, device_stack):
|
||||
"""
|
||||
Used to tell the user this device stack is not completed yet
|
||||
|
||||
:param device_stack: User friendly name of target stack
|
||||
"""
|
||||
self.logger.warning("")
|
||||
self.logger.warning("%s stack is in Alpha state", device_stack)
|
||||
self.logger.warning("Expect some features to be missing")
|
||||
self.logger.warning("")
|
||||
|
||||
def start(self, user_interaction_callback=None):
|
||||
"""
|
||||
Start (activate) session
|
||||
|
||||
:param user_interaction_callback: Callback to be called when user interaction is required,
|
||||
for example when doing UPDI high-voltage activation with user target power toggle.
|
||||
This function could ask the user to toggle power and halt execution waiting for the user
|
||||
to respond (this is default behavior if the callback is None), or if the user is another
|
||||
script it could toggle power automatically and then return.
|
||||
"""
|
||||
#pylint: disable=unused-argument
|
||||
self.logger.info("No specific initializer for this provider")
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop (deactivate) session
|
||||
"""
|
||||
self.logger.info("No specific de-initializer for this provider")
|
||||
|
||||
def hold_in_reset(self):
|
||||
"""
|
||||
Hold target in reset
|
||||
"""
|
||||
self.logger.info("hold_in_reset not implemented for this provider")
|
||||
|
||||
def release_from_reset(self):
|
||||
"""
|
||||
Release target from reset
|
||||
"""
|
||||
self.logger.info("release_from_reset not implemented for this provider")
|
||||
|
||||
class NvmAccessProviderCmsisDapTool(NvmAccessProvider):
|
||||
"""
|
||||
General CMSIS-DAP Tool
|
||||
"""
|
||||
|
||||
def __init__(self, device_info):
|
||||
NvmAccessProvider.__init__(self, device_info)
|
||||
|
||||
|
||||
class NvmAccessProviderCmsisDapAvr(NvmAccessProviderCmsisDapTool):
|
||||
"""
|
||||
AVR CMSIS DAP Tool
|
||||
"""
|
||||
|
||||
def __init__(self, device_info):
|
||||
NvmAccessProviderCmsisDapTool.__init__(self, device_info)
|
||||
248
software/tools/pymcuprog/libs/pymcuprog/nvmserialupdi.py
Normal file
248
software/tools/pymcuprog/libs/pymcuprog/nvmserialupdi.py
Normal file
@@ -0,0 +1,248 @@
|
||||
"""
|
||||
pyupdi-esque NVM implementation
|
||||
"""
|
||||
import binascii
|
||||
|
||||
from pyedbglib.util import binary
|
||||
|
||||
from . import utils
|
||||
from .nvm import NvmAccessProvider
|
||||
from .deviceinfo import deviceinfo
|
||||
from .deviceinfo.deviceinfokeys import DeviceInfoKeysAvr, DeviceMemoryInfoKeys
|
||||
from .deviceinfo.memorynames import MemoryNames
|
||||
from .serialupdi.application import UpdiApplication
|
||||
|
||||
import math
|
||||
|
||||
from . import progress_bar
|
||||
|
||||
# This is a data class so it should not need any methods but will have many instance variables
|
||||
# pylint: disable=too-many-instance-attributes,too-few-public-methods
|
||||
class Dut:
|
||||
"""
|
||||
Create a device object for UpdiApplication
|
||||
"""
|
||||
|
||||
def __init__(self, dev_info):
|
||||
# Parse the device info for memory descriptions
|
||||
device_memory_info = deviceinfo.DeviceMemoryInfo(dev_info)
|
||||
|
||||
flash_info = device_memory_info.memory_info_by_name(MemoryNames.FLASH)
|
||||
self.flash_start = flash_info[DeviceMemoryInfoKeys.ADDRESS]
|
||||
self.flash_size = flash_info[DeviceMemoryInfoKeys.SIZE]
|
||||
self.flash_pagesize = flash_info[DeviceMemoryInfoKeys.PAGE_SIZE]
|
||||
self.syscfg_address = dev_info[DeviceInfoKeysAvr.SYSCFG_BASE]
|
||||
self.nvmctrl_address = dev_info[DeviceInfoKeysAvr.NVMCTRL_BASE]
|
||||
address_key = DeviceMemoryInfoKeys.ADDRESS
|
||||
self.sigrow_address = device_memory_info.memory_info_by_name(MemoryNames.SIGNATURES)[address_key]
|
||||
self.fuses_address = device_memory_info.memory_info_by_name(MemoryNames.FUSES)[address_key]
|
||||
self.userrow_address = device_memory_info.memory_info_by_name(MemoryNames.USER_ROW)[address_key]
|
||||
|
||||
|
||||
class NvmAccessProviderSerial(NvmAccessProvider):
|
||||
"""
|
||||
NVM Access the Python AVR way
|
||||
"""
|
||||
|
||||
def __init__(self, port, device_info, baud):
|
||||
self.avr = None
|
||||
NvmAccessProvider.__init__(self, device_info)
|
||||
if not baud:
|
||||
baud = 115200
|
||||
self.dut = Dut(device_info)
|
||||
self.avr = UpdiApplication(port, baud, self.dut)
|
||||
# Read the device info to set up the UPDI stack variant
|
||||
|
||||
self.avr.read_device_info()
|
||||
try:
|
||||
self.avr.enter_progmode()
|
||||
except IOError as inst:
|
||||
self.logger.error("Device is locked.\nError:\n%s", inst)
|
||||
|
||||
def read_device_id(self):
|
||||
"""
|
||||
Read and display (log) the device info
|
||||
|
||||
:returns: Device ID raw bytes (Little endian)
|
||||
"""
|
||||
self.avr.read_device_info()
|
||||
|
||||
signatures_base = self.dut.sigrow_address
|
||||
|
||||
# Read 3 bytes
|
||||
sig = self.avr.read_data(signatures_base, 3)
|
||||
device_id_read = binary.unpack_be24(sig)
|
||||
self.logger.info("Device ID: '%06X'", device_id_read)
|
||||
if not self.device_info.get(DeviceInfoKeysAvr.DEVICE_ID) == device_id_read:
|
||||
self.logger.warning("ID read ('%06X') does not match expected device id! ('%06X')", device_id_read,
|
||||
self.device_info.get(DeviceInfoKeysAvr.DEVICE_ID))
|
||||
raise ValueError("Device ID does not match")
|
||||
revision = self.avr.read_data(self.device_info.get(DeviceInfoKeysAvr.SYSCFG_BASE) + 1, 1)
|
||||
self.logger.info("Device revision: '%s'", chr(revision[0] + ord('A')))
|
||||
serial = self.avr.read_data(signatures_base + 3, 10)
|
||||
self.logger.info("Device serial number: '%s'", binascii.hexlify(serial))
|
||||
|
||||
# Return the raw signature bytes, but swap the endianness as target sends ID as Big endian
|
||||
return bytearray([sig[2], sig[1], sig[0]])
|
||||
|
||||
def erase(self, memory_info=None, address=None):
|
||||
"""
|
||||
Do a chip erase of the device
|
||||
"""
|
||||
_dummy = memory_info
|
||||
_dummy = address
|
||||
try:
|
||||
self.avr.nvm.chip_erase()
|
||||
except IOError as inst:
|
||||
self.logger.error("Device is locked. Performing unlock with chip erase.\nError: ('%s')", inst)
|
||||
self.avr.unlock()
|
||||
|
||||
def write(self, memory_info, offset, data, blocksize=0, pagewrite_delay=0):
|
||||
"""
|
||||
Write the memory with data
|
||||
|
||||
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
|
||||
:param offset: relative offset within the memory type
|
||||
:param data: the data to program
|
||||
:return: None
|
||||
"""
|
||||
# Make sure the data is aligned to a memory page
|
||||
data_aligned, offset_aligned = utils.pagealign(data,
|
||||
offset,
|
||||
memory_info[DeviceMemoryInfoKeys.PAGE_SIZE],
|
||||
memory_info[DeviceMemoryInfoKeys.WRITE_SIZE])
|
||||
memtype_string = memory_info[DeviceMemoryInfoKeys.NAME]
|
||||
|
||||
offset_aligned += memory_info[DeviceMemoryInfoKeys.ADDRESS]
|
||||
|
||||
if memtype_string in (MemoryNames.FLASH, MemoryNames.EEPROM, MemoryNames.FUSES):
|
||||
write_chunk_size = memory_info[DeviceMemoryInfoKeys.PAGE_SIZE]
|
||||
else:
|
||||
write_chunk_size = len(data_aligned)
|
||||
|
||||
n_chunk = math.ceil(len(data_aligned)/write_chunk_size)
|
||||
bar = progress_bar.ProgressBar(n_chunk, hide=n_chunk == 1)
|
||||
while data_aligned:
|
||||
if len(data_aligned) < write_chunk_size:
|
||||
write_chunk_size = len(data_aligned)
|
||||
chunk = data_aligned[0:write_chunk_size]
|
||||
self.logger.debug("Writing %d bytes to address 0x%06X", write_chunk_size, offset_aligned)
|
||||
if memtype_string == MemoryNames.FUSES:
|
||||
self.avr.nvm.write_fuse(offset_aligned, chunk)
|
||||
elif memtype_string == MemoryNames.EEPROM:
|
||||
self.avr.nvm.write_eeprom(offset_aligned, chunk)
|
||||
else:
|
||||
# Spence Konde, 5/8/2021:
|
||||
# As far as I can tell, this is the only point where, we're writing a hex file, we know both the page size
|
||||
# AND are in the path of blocksize parameter. So - if its 0 or not given we should "do the old behavior", then
|
||||
# blocksize=2. The special value -1 tells us to have it write blocks equal to chunk/page size. Any other number
|
||||
# will be used as blocksize. Negative numbers beyond -1 were replaced with zero way at the beginning, as they would
|
||||
# result in crazy behavior and make everything fall over.
|
||||
# megaTinyCore and DxCore will always pass -1 as blocksize unless we find something where that doesn't work.
|
||||
#
|
||||
# Also, we are now finally in the section of the code specific to serialupdi. Up until we get here, 0 is the default
|
||||
# and if that's what we got, we omit it when making other calls, because there are almost certainly calls elsewhere
|
||||
# that. Now that we are here, the default value is 2 (ie, one word at a time) but that won'ty be something we see often.
|
||||
#
|
||||
# It strikes me that here is *ALSO* where we know whether we are on the first, a middle, or the last page. Say we
|
||||
# kept count of how many pages had been written already - if it was 0 and nChunk > 1, we would pass an argument that says
|
||||
# This is the first page we are writing, do all that stuff we need to do at the start of a bulk write.
|
||||
# if it was nChunk - 1, we would send a different value for that argumennt, saying it was the last one of a bulk write
|
||||
# so it should do the stuff to end the bulk write mode. And otherwise, it gets a third value that gets treated as
|
||||
# a signal to omit all of those. for the streamlined write protocol, which could improve performance by another 22-45%
|
||||
# If you agree, we should do that.
|
||||
# What we currently do is grossly inefficient, because (due to the penalty for small packets) we spend half of our time
|
||||
# for every page: Setting the address pointer (only need to do this at the beginning - when reading second and subsequent pages
|
||||
# the previous writes left the pointer at exactly the location we then set it to.). Setting NVM cmd to FLWR - only needs to be done
|
||||
# at the start of a bulk write, assuming we also stop setting NVM command to NOOP after every page. Setting RSD - if we
|
||||
# do all I'm talking about here, we can set it at start of bulk write. And we can juyst check for for NVM errors before
|
||||
# the first and after the last page, not before and after every page. My models suggest this should improve performance
|
||||
# by 22% at 115200 baud, and 44% and 345600 baud (which is 1.5x 230400 baud - and happens to be about the fastest you can
|
||||
# do a bulk write that is consistent with the datasheet flash write time spec.
|
||||
#
|
||||
# See also my comment below in read() - these two places are where we can achieve the last noticable performance leaps.
|
||||
# -Spence
|
||||
bulk = 1
|
||||
if n_chunk == 1:
|
||||
#if omly one chunk, it is NOT a bulk write.
|
||||
bulk = 0
|
||||
elif len(data_aligned) <= write_chunk_size:
|
||||
# We are on the last page of a bulk write
|
||||
bulk = 2
|
||||
if blocksize == 0:
|
||||
self.avr.nvm.write_flash(offset_aligned, chunk, pagewrite_delay=pagewrite_delay)
|
||||
else:
|
||||
self.avr.nvm.write_flash(offset_aligned, chunk, blocksize=blocksize, bulkwrite = bulk, pagewrite_delay=pagewrite_delay)
|
||||
offset_aligned += write_chunk_size
|
||||
data_aligned = data_aligned[write_chunk_size:]
|
||||
bar.step()
|
||||
|
||||
def read(self, memory_info, offset, numbytes, max_read_chunk=None):
|
||||
"""
|
||||
Read the memory in chunks
|
||||
|
||||
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
|
||||
:param offset: relative offset in the memory type
|
||||
:param numbytes: number of bytes to read
|
||||
:param max_read_chunk: <=256
|
||||
:return: array of bytes read
|
||||
"""
|
||||
offset += memory_info[DeviceMemoryInfoKeys.ADDRESS]
|
||||
|
||||
# if reading from flash, we want to read words if it would reduce number of USB serial transactions.
|
||||
# this function is called for everything though, so be careful not to use it for memories read one byte at a time, like fuses
|
||||
data = []
|
||||
|
||||
if max_read_chunk is None:
|
||||
read_chunk_size = 0x100
|
||||
else:
|
||||
read_chunk_size = max_read_chunk
|
||||
|
||||
use_word_access = False
|
||||
memtype_string = memory_info[DeviceMemoryInfoKeys.NAME]
|
||||
if memtype_string in (MemoryNames.FLASH):
|
||||
if numbytes > 0x100 and max_read_chunk is None:
|
||||
use_word_access = True
|
||||
read_chunk_size = 0x200
|
||||
|
||||
# SACRIFICES SPEED FOR COMPATIBILITY - above line should happen whenever --limitreadsize=1 command line parameter is not passed, so we can only turn it on for specific tools -> programmer options that have this weird limitation. I couldn't propagate it through this mess!
|
||||
n_chunk = math.ceil(numbytes/read_chunk_size)
|
||||
bar = progress_bar.ProgressBar(n_chunk, hide=n_chunk == 1)
|
||||
|
||||
while numbytes:
|
||||
if numbytes < read_chunk_size:
|
||||
read_chunk_size = numbytes
|
||||
self.logger.debug("Reading %d bytes from address 0x%06X", read_chunk_size, offset)
|
||||
if use_word_access:
|
||||
data += self.avr.read_data_words(offset, read_chunk_size>> 1)
|
||||
else:
|
||||
data += self.avr.read_data(offset, read_chunk_size)
|
||||
offset += read_chunk_size
|
||||
numbytes -= read_chunk_size
|
||||
bar.step()
|
||||
|
||||
return data
|
||||
|
||||
def hold_in_reset(self):
|
||||
"""
|
||||
Hold device in reset
|
||||
"""
|
||||
# For UPDI parts it is sufficient to enter programming mode to hold the target in reset
|
||||
# Since the start function is a prerequisite to all functions in this file it can be
|
||||
# assumed that programming mode already has been entered
|
||||
return
|
||||
|
||||
def release_from_reset(self):
|
||||
"""
|
||||
Release device from reset
|
||||
"""
|
||||
# Entering programming mode on UPDI parts will hold the device in reset. So to release
|
||||
# the reset the programming mode must be left.
|
||||
self.avr.leave_progmode()
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop the debugging session
|
||||
"""
|
||||
if self.avr is not None:
|
||||
self.avr.leave_progmode()
|
||||
314
software/tools/pymcuprog/libs/pymcuprog/programmer.py
Normal file
314
software/tools/pymcuprog/libs/pymcuprog/programmer.py
Normal file
@@ -0,0 +1,314 @@
|
||||
"""
|
||||
Python MCU programmer
|
||||
"""
|
||||
import copy
|
||||
from logging import getLogger
|
||||
from collections import namedtuple
|
||||
|
||||
# Device data
|
||||
from .deviceinfo import deviceinfo
|
||||
|
||||
from . import utils
|
||||
from .pymcuprog_errors import PymcuprogNotSupportedError, PymcuprogSessionConfigError
|
||||
from .pymcuprog_errors import PymcuprogError
|
||||
from .nvm import get_nvm_access_provider
|
||||
from .deviceinfo.memorynames import MemoryNameAliases
|
||||
from .deviceinfo.deviceinfokeys import DeviceInfoKeysPic, DeviceMemoryInfoKeys
|
||||
|
||||
DEFAULT_BULK_ERASE_ADDRESS_KEY = DeviceInfoKeysPic.DEFAULT_BULK_ERASE_ADDRESS
|
||||
|
||||
class Programmer:
|
||||
"""
|
||||
Main programmer class.
|
||||
"""
|
||||
|
||||
def __init__(self, transport):
|
||||
# Hook onto logger
|
||||
self.logger = getLogger(__name__)
|
||||
# Use transport passed in
|
||||
self.transport = transport
|
||||
# Clear device model and mem info objects
|
||||
self.device_info = None
|
||||
self.device_model = None
|
||||
self.device_memory_info = None
|
||||
self.options = {}
|
||||
|
||||
def set_options(self, options):
|
||||
"""
|
||||
Stores options
|
||||
|
||||
:param options: options to store
|
||||
"""
|
||||
self.options = options
|
||||
|
||||
def load_device(self, device_name):
|
||||
"""
|
||||
Loads the device from the device folder
|
||||
|
||||
:param device_name:
|
||||
:raises: PymcuprogNotSupportedError if device is not supported
|
||||
"""
|
||||
# Try to instantiate device info. This will check if there is device support at all
|
||||
try:
|
||||
self.logger.info("Setting up programming session for '%s'", device_name)
|
||||
self.device_info = deviceinfo.getdeviceinfo(device_name)
|
||||
except ImportError as err:
|
||||
raise PymcuprogNotSupportedError("Unable to find device info: {}".format(err))
|
||||
|
||||
# Now build a memory model for this device
|
||||
self.device_memory_info = deviceinfo.DeviceMemoryInfo(self.device_info)
|
||||
|
||||
def setup_device(self, interface=None, packpath=None, clk=None):
|
||||
"""
|
||||
Sets up a programming session with a given device
|
||||
|
||||
:param device_name: device to program
|
||||
:param interface: interface to use
|
||||
:param packpath: path to packs to use (for PIC)
|
||||
:param clk: clock frequency
|
||||
|
||||
:raises SerialException if unable to connect to serial port (if using serial port instead of physical debugger)
|
||||
"""
|
||||
# Device must be loaded first
|
||||
if self.device_info is None:
|
||||
raise PymcuprogError("Device must be loaded before setup!")
|
||||
|
||||
# Find a NVM provider that matches the device and transport
|
||||
try:
|
||||
self.device_model = get_nvm_access_provider(self.transport,
|
||||
self.device_info,
|
||||
interface=interface,
|
||||
packpath=packpath,
|
||||
frequency=clk,
|
||||
options=self.options)
|
||||
except ImportError:
|
||||
raise PymcuprogSessionConfigError(
|
||||
"Unable to setup stack using the given packpath: '{0:s}'".format(
|
||||
packpath or "None"))
|
||||
if self.device_model is None:
|
||||
raise PymcuprogSessionConfigError("Unable to setup stack, check session config parameters")
|
||||
|
||||
def start(self, user_interaction_callback=None):
|
||||
"""
|
||||
Starts the programming session with the device model
|
||||
|
||||
:param user_interaction_callback: Callback to be called when user interaction is required,
|
||||
for example when doing UPDI high-voltage activation with user target power toggle.
|
||||
This function could ask the user to toggle power and halt execution waiting for the user
|
||||
to respond (this is default behavior if the callback is None), or if the user is another
|
||||
script it could toggle power automatically and then return.
|
||||
"""
|
||||
self.device_model.start(user_interaction_callback=user_interaction_callback)
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stops the programming session with the device model
|
||||
"""
|
||||
return self.device_model.stop()
|
||||
|
||||
def get_device_model(self):
|
||||
"""
|
||||
Exposes the device model in use to clients
|
||||
"""
|
||||
return self.device_model
|
||||
|
||||
def get_device_memory_info(self):
|
||||
"""
|
||||
Exposes the device memory model to clients
|
||||
"""
|
||||
return self.device_memory_info
|
||||
|
||||
# Device model API functions
|
||||
|
||||
def read_device_id(self):
|
||||
"""
|
||||
Read the device ID
|
||||
|
||||
:returns: Device ID raw bytes (Little endian)
|
||||
"""
|
||||
self.logger.info("Reading device ID...")
|
||||
return self.device_model.read_device_id()
|
||||
|
||||
def erase(self, memory_name, address):
|
||||
"""
|
||||
Erase the device
|
||||
|
||||
:param memory_name: memory region to erase as defined in deviceinfo.memorynames
|
||||
MemoryNameAliases.ALL will run the widest erase (e.g. chip erase on AVR or the widest bulk erase on PIC)
|
||||
:param address: address to erase
|
||||
"""
|
||||
self.logger.info("Erase...")
|
||||
if memory_name == MemoryNameAliases.ALL:
|
||||
# Run default erase which is the widest erase
|
||||
memory_info = None
|
||||
if DEFAULT_BULK_ERASE_ADDRESS_KEY in self.device_info:
|
||||
address = self.device_info[DEFAULT_BULK_ERASE_ADDRESS_KEY]
|
||||
else:
|
||||
address = None
|
||||
else:
|
||||
memory_info = self.device_memory_info.memory_info_by_name(memory_name)
|
||||
self.device_model.erase(memory_info=memory_info, address=address)
|
||||
|
||||
def write_memory(self, data, memory_name, offset=0, blocksize=0, pagewrite_delay=0):
|
||||
"""
|
||||
Write memory on the device
|
||||
|
||||
:param data: data to write
|
||||
:param memory_name: memory type to write
|
||||
:param offset: offset/address within that region to write
|
||||
:return: boolean status
|
||||
|
||||
:raises: ValueError if trying to write outside the specified memory
|
||||
:raises: ValueError if the specified memory is not defined for the target device
|
||||
:raises: PymcuprogNotSupportedError if memory can't be written
|
||||
"""
|
||||
self.logger.info("Write...")
|
||||
|
||||
# Just some sanity checking of inputs
|
||||
if offset < 0:
|
||||
raise ValueError("Write offset can't be negative, requested offset: {}".format(offset))
|
||||
|
||||
# Get information about the memory area
|
||||
memory = self.device_memory_info.memory_info_by_name(memory_name)
|
||||
size = memory[DeviceMemoryInfoKeys.SIZE]
|
||||
|
||||
if memory[DeviceMemoryInfoKeys.WRITE_SIZE] == 0:
|
||||
raise PymcuprogNotSupportedError("{} memory can't be written".format(memory_name))
|
||||
|
||||
if offset + len(data) > size:
|
||||
msg = "{} bytes of data at offset {} is outside the boundaries of '{}' with size {}".format(len(data),
|
||||
offset,
|
||||
memory_name,
|
||||
size)
|
||||
raise ValueError(msg)
|
||||
|
||||
# Write the data to NVM
|
||||
self.logger.info("Writing %d bytes of data to %s...", len(data), memory[DeviceMemoryInfoKeys.NAME])
|
||||
if blocksize == 0:
|
||||
self.device_model.write(memory, offset, data, pagewrite_delay=pagewrite_delay)
|
||||
else:
|
||||
self.device_model.write(memory, offset, data, blocksize=blocksize, pagewrite_delay=pagewrite_delay)
|
||||
self.logger.info("Write complete.")
|
||||
return True
|
||||
|
||||
def verify_memory(self, data, memory_name, offset=0, max_read_chunk=None):
|
||||
"""
|
||||
Verify memory content
|
||||
|
||||
:param data: data to verify against
|
||||
:param memory_name: memory type
|
||||
:param offset: offset/address within that memory region
|
||||
:return: boolean compare status
|
||||
"""
|
||||
# Get information about the memory area
|
||||
memory = self.device_memory_info.memory_info_by_name(memory_name)
|
||||
verify_mask = memory[DeviceMemoryInfoKeys.VERIFY_MASK]
|
||||
|
||||
# Read back and compare the data to verify
|
||||
data_verify = self.read_memory(memory_name, offset, len(data), max_read_chunk=max_read_chunk)[0].data
|
||||
|
||||
self.logger.info("Verifying...")
|
||||
try:
|
||||
# Use the compare util, which throws ValueError on mismatch
|
||||
utils.compare(data, data_verify, offset, verify_mask)
|
||||
except ValueError as error:
|
||||
self.logger.error("Verify failed: %s", str(error))
|
||||
return False
|
||||
return True
|
||||
|
||||
def read_memory(self, memory_name, offset, numbytes=0, max_read_chunk=None):
|
||||
"""
|
||||
Read device memory
|
||||
|
||||
:param memory_name: memory type to read as defined in deviceinfo.memorynames
|
||||
MemoryNameAliases.ALL will read all memories defined in the device model for the configured
|
||||
device (numbytes and offset will be ignored)
|
||||
:param offset: offset/start address within the memory to start reading from
|
||||
:param numbytes: number of bytes to read. 0 means read all memory locations for given memory
|
||||
type (offset still applies)
|
||||
:return: list of namedtuples with two fields: data and memory_info. data contains a byte array
|
||||
of raw data bytes and memory_info is a dictionary with memory information as defined in
|
||||
deviceinfo.deviceinfo.DeviceMemoryInfo. Normally the list will contain one item,
|
||||
but when memory_name parameter is MemoryNameAliases.ALL there will be one namedtuple
|
||||
item per memory type read.
|
||||
:raises: ValueError if trying to read outside the specified memory
|
||||
:raises: ValueError if the specified memory is not defined for the target device
|
||||
"""
|
||||
# Just some sanity checking of inputs
|
||||
if offset < 0:
|
||||
raise ValueError("Read offset can't be negative, requested offset: {}".format(offset))
|
||||
if numbytes < 0:
|
||||
raise ValueError("Can't read negative number of bytes, requested numbytes: {}".format(numbytes))
|
||||
|
||||
memories_read = []
|
||||
|
||||
if memory_name == MemoryNameAliases.ALL:
|
||||
memories = list(self.device_memory_info.mem_by_name.keys())
|
||||
|
||||
# When reading all memories offset is ignored
|
||||
offset = 0
|
||||
# ...and the same with numbytes
|
||||
numbytes = 0
|
||||
else:
|
||||
memories = [memory_name]
|
||||
|
||||
for memory in memories:
|
||||
# Get information about the memory area
|
||||
meminfo = self.device_memory_info.memory_info_by_name(memory)
|
||||
|
||||
# For each memory type there will be one named tuple with raw data as a bytearray and a dictionary
|
||||
# with information about the memory
|
||||
memory_read_tuple = namedtuple("Memory", 'data memory_info')
|
||||
memory_read_tuple.data = bytearray([])
|
||||
memory_read_tuple.memory_info = meminfo
|
||||
|
||||
# Align the read to a page boundary
|
||||
page_offset = offset % meminfo[DeviceMemoryInfoKeys.PAGE_SIZE]
|
||||
|
||||
offset_adjusted = offset - page_offset
|
||||
numbytes_adjusted = numbytes
|
||||
# If number of bytes is not given, default to read the complete memory starting at the given offset
|
||||
if numbytes == 0:
|
||||
numbytes_adjusted = meminfo[DeviceMemoryInfoKeys.SIZE] - offset_adjusted
|
||||
else:
|
||||
numbytes_adjusted = numbytes_adjusted + page_offset
|
||||
|
||||
# Read size correction
|
||||
read_size_key = DeviceMemoryInfoKeys.READ_SIZE
|
||||
if numbytes_adjusted % meminfo[read_size_key]:
|
||||
extra = meminfo[read_size_key] - numbytes_adjusted % meminfo[read_size_key]
|
||||
numbytes_adjusted += extra
|
||||
else:
|
||||
extra = 0
|
||||
|
||||
if offset_adjusted + numbytes_adjusted > meminfo[DeviceMemoryInfoKeys.SIZE]:
|
||||
raise ValueError("{} bytes of data at offset {} is outside the boundaries of '{}' with size {}".format(
|
||||
numbytes_adjusted, offset, meminfo[DeviceMemoryInfoKeys.NAME], meminfo[DeviceMemoryInfoKeys.SIZE]))
|
||||
|
||||
# Read the data
|
||||
self.logger.info("Reading %d bytes from %s...", numbytes_adjusted, meminfo[DeviceMemoryInfoKeys.NAME])
|
||||
data = self.device_model.read(meminfo, offset_adjusted, numbytes_adjusted, max_read_chunk=max_read_chunk)
|
||||
|
||||
# Strip the extra data that was read
|
||||
memory_read_tuple.data = data[page_offset:numbytes_adjusted - extra]
|
||||
|
||||
# Append a copy of the memory namedtuple to avoid a reference being appended as the memory_read_tuple will
|
||||
# change for each loop iteration. Note that when using a deepcopy the content of the memory_read_tuple will
|
||||
# be copied too
|
||||
memories_read.append(copy.deepcopy(memory_read_tuple))
|
||||
|
||||
return memories_read
|
||||
|
||||
def hold_in_reset(self):
|
||||
"""
|
||||
Hold the device in reset
|
||||
"""
|
||||
self.logger.info("Hold in reset")
|
||||
self.device_model.hold_in_reset()
|
||||
|
||||
def release_from_reset(self):
|
||||
"""
|
||||
Release the device from reset (i.e. let the device run)
|
||||
"""
|
||||
self.logger.info("Release from reset")
|
||||
self.device_model.release_from_reset()
|
||||
58
software/tools/pymcuprog/libs/pymcuprog/progress_bar.py
Normal file
58
software/tools/pymcuprog/libs/pymcuprog/progress_bar.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
class ProgressBar:
|
||||
def __init__(self, n_steps, width=50, hide=False):
|
||||
self.width = width
|
||||
self.n_steps = n_steps
|
||||
self.count_step = 0
|
||||
self.count_char = 0
|
||||
self.hide = hide
|
||||
self.print_start()
|
||||
|
||||
def print_start(self):
|
||||
if not self.hide:
|
||||
sys.stdout.write("[%s]" % (" " * self.width))
|
||||
sys.stdout.flush()
|
||||
|
||||
def print_end(self):
|
||||
if not self.hide:
|
||||
sys.stdout.write("\n")
|
||||
sys.stdout.flush()
|
||||
|
||||
def update(self):
|
||||
n1 = self.count_char
|
||||
n2 = self.width - self.count_char
|
||||
if not self.hide:
|
||||
sys.stdout.write("\r[" + "=" * n1 + " " * n2 + "] {}/{}".format(self.count_step, self.n_steps))
|
||||
sys.stdout.flush()
|
||||
|
||||
def step(self):
|
||||
self.count_step += 1
|
||||
count_char_new = self.width * self.count_step // self.n_steps
|
||||
|
||||
if count_char_new > self.count_char:
|
||||
self.count_char = count_char_new
|
||||
self.update()
|
||||
|
||||
if self.count_step == self.n_steps:
|
||||
self.print_end()
|
||||
|
||||
|
||||
def test():
|
||||
n_steps = 100
|
||||
|
||||
print("Starting...")
|
||||
|
||||
b = ProgressBar(n_steps)
|
||||
|
||||
for i in range(n_steps):
|
||||
time.sleep(0.01)
|
||||
b.step()
|
||||
|
||||
print("done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test()
|
||||
258
software/tools/pymcuprog/libs/pymcuprog/pymcuprog.py
Normal file
258
software/tools/pymcuprog/libs/pymcuprog/pymcuprog.py
Normal file
@@ -0,0 +1,258 @@
|
||||
"""
|
||||
Python MCU programmer Command Line Interface utility
|
||||
"""
|
||||
# Python 3 compatibility for Python 2
|
||||
from __future__ import print_function
|
||||
|
||||
# args, logging
|
||||
import sys
|
||||
import argparse
|
||||
import os
|
||||
import logging
|
||||
from logging.config import dictConfig
|
||||
import textwrap
|
||||
import yaml
|
||||
|
||||
try:
|
||||
from pathlib import Path
|
||||
except ImportError:
|
||||
from pathlib2 import Path # python 2 backport
|
||||
|
||||
from appdirs import user_log_dir
|
||||
from yaml.scanner import ScannerError
|
||||
|
||||
# pymcuprog main function
|
||||
from . import pymcuprog_main
|
||||
from .pymcuprog_main import WRITE_TO_HEX_MEMORIES
|
||||
from .deviceinfo.memorynames import MemoryNames, MemoryNameAliases
|
||||
|
||||
|
||||
def setup_logging(user_requested_level=logging.WARNING, default_path='logging.yaml',
|
||||
env_key='MICROCHIP_PYTHONTOOLS_CONFIG'):
|
||||
"""
|
||||
Setup logging configuration for pymcuprog CLI
|
||||
"""
|
||||
# Logging config YAML file can be specified via environment variable
|
||||
value = os.getenv(env_key, None)
|
||||
if value:
|
||||
path = value
|
||||
else:
|
||||
# Otherwise use the one shipped with this application
|
||||
path = os.path.join(os.path.dirname(__file__), default_path)
|
||||
# Load the YAML if possible
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
with open(path, 'rt') as file:
|
||||
# Load logging configfile from yaml
|
||||
configfile = yaml.safe_load(file)
|
||||
# File logging goes to user log directory under Microchip/modulename
|
||||
logdir = user_log_dir(__name__, "Microchip")
|
||||
# Look through all handlers, and prepend log directory to redirect all file loggers
|
||||
num_file_handlers = 0
|
||||
for handler in configfile['handlers'].keys():
|
||||
# A filename key
|
||||
if 'filename' in configfile['handlers'][handler].keys():
|
||||
configfile['handlers'][handler]['filename'] = os.path.join(
|
||||
logdir, configfile['handlers'][handler]['filename'])
|
||||
num_file_handlers += 1
|
||||
# If file logging is enabled, it needs a folder
|
||||
if num_file_handlers > 0:
|
||||
# Create it if it does not exist
|
||||
Path(logdir).mkdir(exist_ok=True, parents=True)
|
||||
# Console logging takes granularity argument from CLI user
|
||||
configfile['handlers']['console']['level'] = user_requested_level
|
||||
# Root logger must be the most verbose of the ALL YAML configurations and the CLI user argument
|
||||
most_verbose_logging = min(user_requested_level, getattr(logging, configfile['root']['level']))
|
||||
for handler in configfile['handlers'].keys():
|
||||
# A filename key
|
||||
if 'filename' in configfile['handlers'][handler].keys():
|
||||
level = getattr(logging, configfile['handlers'][handler]['level'])
|
||||
most_verbose_logging = min(most_verbose_logging, level)
|
||||
configfile['root']['level'] = most_verbose_logging
|
||||
dictConfig(configfile)
|
||||
return
|
||||
except ScannerError:
|
||||
# Error while parsing YAML
|
||||
print("Error parsing logging config file '{}'".format(path))
|
||||
except KeyError as keyerror:
|
||||
# Error looking for custom fields in YAML
|
||||
print("Key {} not found in logging config file".format(keyerror))
|
||||
else:
|
||||
# Config specified by environment variable not found
|
||||
print("Unable to open logging config file '{}'".format(path))
|
||||
|
||||
# If all else fails, revert to basic logging at specified level for this application
|
||||
print("Reverting to basic logging.")
|
||||
logging.basicConfig(level=user_requested_level)
|
||||
|
||||
|
||||
# Helper functions
|
||||
def _parse_literal(literal):
|
||||
"""
|
||||
Literals can either be integers or float values. Default is Integer
|
||||
"""
|
||||
try:
|
||||
return int(literal, 0)
|
||||
except ValueError:
|
||||
return float(literal)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Entrypoint for installable CLI
|
||||
|
||||
Configures the CLI and parses the arguments
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description=textwrap.dedent('''\
|
||||
Generic programmer of selected AVR, PIC and SAM devices
|
||||
|
||||
Basic actions:
|
||||
- ping: reads the device ID or signature
|
||||
- read: read NVM
|
||||
- write: write NVM
|
||||
- erase: erase NVM
|
||||
'''),
|
||||
epilog=textwrap.dedent('''\
|
||||
Usage examples:
|
||||
|
||||
Ping a device on-board a kit:
|
||||
- pymcuprog.py ping
|
||||
|
||||
Ping a device using Atmel-ICE
|
||||
- pymcuprog.py -t atmelice -d atmega4809 -i updi ping
|
||||
|
||||
Read the some bytes of flash:
|
||||
- pymcuprog.py read -m flash -o 0x80 -b 64
|
||||
|
||||
Erase an UPDI device:
|
||||
- pymcuprog.py erase
|
||||
|
||||
Erase a locked UPDI device:
|
||||
- pymcuprog.py ping --chip-erase-locked-device
|
||||
|
||||
Set target supply voltage on a kit:
|
||||
- pymcuprog.py setsupplyvoltage -l 3.3
|
||||
'''))
|
||||
|
||||
parser.add_argument("action",
|
||||
help="action to perform",
|
||||
# This makes the action argument optional
|
||||
# only if -V/--version or -R/release_info argument is given
|
||||
nargs="?" if "-V" in sys.argv or "--version" in sys.argv \
|
||||
or "-R" in sys.argv or "--release-info" in sys.argv else None,
|
||||
default="ping",
|
||||
# nargs='?', # this makes ping the default, and -h the only way to get usage()
|
||||
choices=['ping', 'erase', 'read', 'write', 'verify', 'getvoltage', 'getsupplyvoltage',
|
||||
'reboot-debugger',
|
||||
'setsupplyvoltage', 'getusbvoltage', 'reset'])
|
||||
|
||||
# Device to program
|
||||
parser.add_argument("-d", "--device",
|
||||
type=str,
|
||||
help="device to program")
|
||||
|
||||
# Pack path
|
||||
parser.add_argument("-p", "--packpath",
|
||||
type=str,
|
||||
help="path to pack")
|
||||
|
||||
# Tool to use
|
||||
parser.add_argument("-t", "--tool",
|
||||
type=str,
|
||||
help="tool to connect to")
|
||||
|
||||
parser.add_argument("-s", "--serialnumber",
|
||||
type=str,
|
||||
help="USB serial number of the unit to use")
|
||||
|
||||
# Memtype
|
||||
memtype_helpstring = "memory area to access: {}".format(MemoryNameAliases.ALL)
|
||||
for memtype in MemoryNames.get_all():
|
||||
memtype_helpstring += ", '{}'".format(memtype)
|
||||
parser.add_argument("-m", "--memory",
|
||||
type=str,
|
||||
default=MemoryNameAliases.ALL,
|
||||
help=memtype_helpstring)
|
||||
|
||||
parser.add_argument("-o", "--offset",
|
||||
type=lambda x: int(x, 0),
|
||||
default="0",
|
||||
help="memory byte offset to access")
|
||||
|
||||
parser.add_argument("-b", "--bytes",
|
||||
type=int,
|
||||
default=0,
|
||||
help="number of bytes to access")
|
||||
|
||||
parser.add_argument("-l", "--literal",
|
||||
type=_parse_literal,
|
||||
nargs='+',
|
||||
help="literal values to write")
|
||||
|
||||
filename_helpstring_extra = "Note that when reading to hex file only "
|
||||
filename_helpstring_extra += ", ".join(WRITE_TO_HEX_MEMORIES)
|
||||
filename_helpstring_extra += " memories will be written to the hex file"
|
||||
parser.add_argument("-f", "--filename",
|
||||
type=str,
|
||||
help="file to write / read. "
|
||||
"{}".format(filename_helpstring_extra))
|
||||
|
||||
parser.add_argument("-c", "--clk",
|
||||
type=str,
|
||||
help="clock frequency in Hz (bps) for programming interface. "
|
||||
"(eg: '-c 32768' or '-c 115k' or '-c 1M')")
|
||||
|
||||
parser.add_argument("-u", "--uart",
|
||||
type=str,
|
||||
help="UART to use for UPDI")
|
||||
|
||||
parser.add_argument("-i", "--interface",
|
||||
type=str,
|
||||
help="Interface to use")
|
||||
|
||||
parser.add_argument("-v", "--verbose",
|
||||
default="warning", choices=['debug', 'info', 'warning', 'error', 'critical'],
|
||||
help="Logging verbosity level")
|
||||
|
||||
parser.add_argument("-V", "--version",
|
||||
help="Print pymcuprog version number and exit",
|
||||
action="store_true")
|
||||
|
||||
parser.add_argument("-R", "--release-info", action="store_true",
|
||||
help="Print pymcuprog release details and exit")
|
||||
|
||||
parser.add_argument("--verify",
|
||||
help="verify after write from file",
|
||||
action="store_true")
|
||||
|
||||
parser.add_argument("-x", "--timing",
|
||||
help="add timing output",
|
||||
action="store_true")
|
||||
|
||||
# Ex-options
|
||||
parser.add_argument("-hv", "--high-voltage",
|
||||
choices=['tool-toggle-power', 'user-toggle-power', 'simple-unsafe-pulse'],
|
||||
help="UPDI high-voltage activation mode")
|
||||
|
||||
parser.add_argument("-ul", "--user-row-locked-device",
|
||||
help="Writes the User Row on a locked device (UPDI devices only)",
|
||||
action="store_true")
|
||||
|
||||
parser.add_argument("-cl", "--chip-erase-locked-device",
|
||||
help="Execute a Chip Erase on a locked device (UPDI devices only)",
|
||||
action="store_true")
|
||||
|
||||
# Parse args
|
||||
arguments = parser.parse_args()
|
||||
|
||||
# Setup logging
|
||||
setup_logging(user_requested_level=getattr(logging, arguments.verbose.upper()))
|
||||
|
||||
# Call main with args
|
||||
return pymcuprog_main.pymcuprog(arguments)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
79
software/tools/pymcuprog/libs/pymcuprog/pymcuprog_errors.py
Normal file
79
software/tools/pymcuprog/libs/pymcuprog/pymcuprog_errors.py
Normal file
@@ -0,0 +1,79 @@
|
||||
"""
|
||||
Pymcuprog specific exceptions
|
||||
"""
|
||||
|
||||
class PymcuprogError(Exception):
|
||||
"""
|
||||
Base class for all Pymcuprog specific exceptions
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PymcuprogError, self).__init__(msg)
|
||||
self.code = code
|
||||
|
||||
class PymcuprogToolConfigurationError(PymcuprogError):
|
||||
"""
|
||||
Signals that a tool was incorrectly configured
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PymcuprogToolConfigurationError, self).__init__(msg)
|
||||
self.code = code
|
||||
|
||||
class PymcuprogToolConnectionError(PymcuprogError):
|
||||
"""
|
||||
Signals that an attempted connect failed
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PymcuprogToolConnectionError, self).__init__(msg)
|
||||
self.code = code
|
||||
|
||||
class PymcuprogNotSupportedError(PymcuprogError):
|
||||
"""
|
||||
Signals that an attempted operation is not supported
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PymcuprogNotSupportedError, self).__init__(msg)
|
||||
self.code = code
|
||||
|
||||
class PymcuprogSessionError(PymcuprogError):
|
||||
"""
|
||||
Signals that a session is not active
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PymcuprogSessionError, self).__init__(msg)
|
||||
self.code = code
|
||||
|
||||
class PymcuprogSessionConfigError(PymcuprogError):
|
||||
"""
|
||||
Signals that a session is not configured correctly
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PymcuprogSessionConfigError, self).__init__(msg)
|
||||
self.code = code
|
||||
|
||||
|
||||
class PymcuprogDeviceLockedError(PymcuprogError):
|
||||
"""
|
||||
Signals that the device is locked and a chip erase is required to unlock it
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PymcuprogDeviceLockedError, self).__init__(msg)
|
||||
self.code = code
|
||||
|
||||
class PymcuprogEraseError(PymcuprogError):
|
||||
"""
|
||||
Signals that an erase can't be executed
|
||||
|
||||
Either the erase is not possible or the erase can't be executed without side effects,
|
||||
i.e. erasing more memories than requested
|
||||
"""
|
||||
|
||||
def __init__(self, msg=None, code=0):
|
||||
super(PymcuprogEraseError, self).__init__(msg)
|
||||
self.code = code
|
||||
561
software/tools/pymcuprog/libs/pymcuprog/pymcuprog_main.py
Normal file
561
software/tools/pymcuprog/libs/pymcuprog/pymcuprog_main.py
Normal file
@@ -0,0 +1,561 @@
|
||||
"""
|
||||
Python MCU programmer, CLI main program
|
||||
"""
|
||||
# Python 3 compatibility for Python 2
|
||||
from __future__ import print_function
|
||||
|
||||
# utils
|
||||
import time
|
||||
import os
|
||||
from copy import copy
|
||||
|
||||
from .backend import Backend, SessionConfig
|
||||
from .toolconnection import ToolUsbHidConnection, ToolSerialConnection
|
||||
from .deviceinfo.memorynames import MemoryNameAliases, MemoryNames
|
||||
from .deviceinfo.eraseflags import ChiperaseEffect
|
||||
from .deviceinfo.deviceinfo import get_supported_devices
|
||||
from .deviceinfo.deviceinfokeys import DeviceMemoryInfoKeys
|
||||
|
||||
from .utils import print_tool_info, showdata, verify_flash_from_bin, compare
|
||||
from .hexfileutils import write_memories_to_hex, write_memory_to_hex, read_memories_from_hex, verify_flash_from_hex
|
||||
from .pymcuprog_errors import PymcuprogNotSupportedError, PymcuprogSessionConfigError, \
|
||||
PymcuprogToolConnectionError, PymcuprogDeviceLockedError
|
||||
|
||||
try:
|
||||
from .version import VERSION, BUILD_DATE, COMMIT_ID
|
||||
except ImportError:
|
||||
VERSION = "0.0.0"
|
||||
COMMIT_ID = "N/A"
|
||||
BUILD_DATE = "N/A"
|
||||
|
||||
STATUS_SUCCESS = 0
|
||||
STATUS_FAILURE = 1
|
||||
STATUS_FAILURE_LOCKED = 2
|
||||
|
||||
# Only include memories that can be written when writing memories to hex file
|
||||
WRITE_TO_HEX_MEMORIES = [MemoryNames.EEPROM, MemoryNames.FLASH, MemoryNames.FUSES, MemoryNames.CONFIG_WORD]
|
||||
|
||||
|
||||
def pymcuprog(args):
|
||||
"""
|
||||
Main program
|
||||
"""
|
||||
if args.version or args.release_info:
|
||||
print("pymcuprog version {}".format(VERSION))
|
||||
if args.release_info:
|
||||
print("Build date: {}".format(BUILD_DATE))
|
||||
print("Commit ID: {}".format(COMMIT_ID))
|
||||
return STATUS_SUCCESS
|
||||
|
||||
backend = Backend()
|
||||
|
||||
toolconnection = _setup_tool_connection(args)
|
||||
|
||||
try:
|
||||
backend.connect_to_tool(toolconnection)
|
||||
except PymcuprogToolConnectionError as error:
|
||||
print(error)
|
||||
return STATUS_FAILURE
|
||||
|
||||
status = None
|
||||
if args.tool not in ['uart']:
|
||||
# This section can initialise all features requiring non-UART transports
|
||||
|
||||
# DAP info only available on native CMSIS-DAP
|
||||
dap_info = backend.read_tool_info()
|
||||
print_tool_info(dap_info)
|
||||
|
||||
# Targetless actions, only available on HID tools
|
||||
status = _debugger_actions(backend, args)
|
||||
|
||||
if status is not None:
|
||||
backend.disconnect_from_tool()
|
||||
return status
|
||||
|
||||
device_selected = _select_target_device(backend, args)
|
||||
if device_selected is None:
|
||||
backend.disconnect_from_tool()
|
||||
return STATUS_FAILURE
|
||||
|
||||
status = _start_session(backend, device_selected, args)
|
||||
if status != STATUS_SUCCESS:
|
||||
backend.disconnect_from_tool()
|
||||
return status
|
||||
|
||||
# -x timer argument
|
||||
time_start = None
|
||||
if args.timing:
|
||||
print("Starting timer")
|
||||
time_start = time.time()
|
||||
|
||||
_programming_actions(backend, args)
|
||||
|
||||
backend.end_session()
|
||||
backend.disconnect_from_tool()
|
||||
if args.timing:
|
||||
time_stop = time.time()
|
||||
print("Operation took {0:.03f}s".format(time_stop - time_start))
|
||||
|
||||
print("Done.")
|
||||
|
||||
return status
|
||||
|
||||
|
||||
def _action_getvoltage(backend):
|
||||
voltage = backend.read_target_voltage()
|
||||
print("Measured voltage: {0:0.2f}V".format(voltage))
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _action_getsupplyvoltage(backend):
|
||||
voltage = backend.read_supply_voltage_setpoint()
|
||||
print("Supply voltage set to {0:0.2f}V".format(voltage))
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _action_getusbvoltage(backend):
|
||||
voltage = backend.read_usb_voltage()
|
||||
print("USB voltage is {0:0.2f}V".format(voltage))
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _action_setsupplyvoltage(backend, literal):
|
||||
voltage = backend.read_supply_voltage_setpoint()
|
||||
print("Supply voltage is currently set to {0:0.2f}V".format(voltage))
|
||||
if literal is None:
|
||||
print("Specify voltage in Volts using -l <literal>")
|
||||
else:
|
||||
setvoltage = literal[0]
|
||||
if setvoltage == voltage:
|
||||
print("Voltage is already right where you want it.")
|
||||
else:
|
||||
print("Setting supply voltage to {0:0.2f}V".format(setvoltage))
|
||||
backend.set_supply_voltage_setpoint(setvoltage)
|
||||
voltage = backend.read_supply_voltage_setpoint()
|
||||
print("Supply voltage is now set to {0:0.2f}V".format(voltage))
|
||||
|
||||
# Static delay to let the target voltage settle before reading it out
|
||||
# Alternatively a retry loop could be used, but it is difficult to know when to terminate
|
||||
# the loop as sometimes the final voltage is not known, for example if setting the voltage
|
||||
# to 5.5V the actual voltage will depend upon the USB voltage. If the USB voltage is only
|
||||
# 4.9V the target voltage will never reach more than 4.9V
|
||||
time.sleep(0.5)
|
||||
voltage = backend.read_target_voltage()
|
||||
print("Measured voltage: {0:0.2f}V".format(voltage))
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _action_reboot_debugger(backend):
|
||||
print("Rebooting tool...")
|
||||
backend.reboot_tool()
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _action_ping(backend):
|
||||
print("Pinging device...")
|
||||
response = backend.read_device_id()
|
||||
idstring = ''
|
||||
for idbyte in response:
|
||||
idstring = '{:02X}'.format(idbyte) + idstring
|
||||
print("Ping response: {}".format(idstring))
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _action_erase(backend, args):
|
||||
if args.memory is None or args.memory == MemoryNameAliases.ALL:
|
||||
print("Chip/Bulk erase,")
|
||||
for memname in MemoryNames.get_all():
|
||||
try:
|
||||
effect = backend.get_chiperase_effect(memname)
|
||||
except ValueError:
|
||||
# This memory type does not exist for this device, just continue
|
||||
continue
|
||||
else:
|
||||
if effect != ChiperaseEffect.NOT_ERASED:
|
||||
print("Memory type {} is {}".format(memname, effect))
|
||||
print("...")
|
||||
else:
|
||||
if backend.is_isolated_erase_possible(args.memory):
|
||||
print("Erasing {}...".format(args.memory))
|
||||
else:
|
||||
print("ERROR: {} memory can't be erased or "
|
||||
"can't be erased without affecting other memories".format(args.memory))
|
||||
chiperase_effect = backend.get_chiperase_effect(args.memory)
|
||||
if chiperase_effect != ChiperaseEffect.NOT_ERASED:
|
||||
print("{} memory is {} by a chip/bulk erase".format(args.memory, chiperase_effect))
|
||||
print("Use erase without -m option to erase this memory")
|
||||
return STATUS_FAILURE
|
||||
|
||||
backend.erase(args.memory, address=None)
|
||||
print("Erased.")
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _action_read(backend, args):
|
||||
# Reading with bytes argument requires that memory type is specified
|
||||
if args.bytes != 0 and args.memory == MemoryNameAliases.ALL:
|
||||
print("Memory area must be specified when number of bytes is specified.")
|
||||
return STATUS_FAILURE
|
||||
|
||||
print("Reading...")
|
||||
result = backend.read_memory(args.memory, args.offset, args.bytes, args.max_chunk_size)
|
||||
|
||||
# If a filename is specified, write to it
|
||||
hexfile = False
|
||||
binary = False
|
||||
filepath = None
|
||||
if args.filename is not None:
|
||||
filepath = os.path.normpath(args.filename)
|
||||
prefix, postfix = _get_file_prefix_and_postfix(filepath)
|
||||
# If it ends in hex, use intel hex format, else binary
|
||||
if postfix == 'hex':
|
||||
hexfile = True
|
||||
else:
|
||||
binary = True
|
||||
|
||||
# Print the data or save it to a file
|
||||
if hexfile:
|
||||
if args.memory == MemoryNameAliases.ALL:
|
||||
# Only memories that can be written should go into the hex file
|
||||
result_to_write = _extract_writeable_memories(result)
|
||||
write_memories_to_hex(filepath, result_to_write)
|
||||
else:
|
||||
write_memory_to_hex(filepath, result[0], args.offset)
|
||||
print("Data written to hex file: '{0:s}'".format(filepath))
|
||||
elif binary:
|
||||
for item in result:
|
||||
memory_name = item.memory_info[DeviceMemoryInfoKeys.NAME]
|
||||
data = item.data
|
||||
filepath = "{}_{}.{}".format(prefix, memory_name, postfix)
|
||||
# Binary files does not have addressing, and needs a split on memory type
|
||||
with open(filepath, "wb") as binfile:
|
||||
binfile.write(data)
|
||||
print("Data written to binary file: '{0:s}'".format(filepath))
|
||||
else:
|
||||
for item in result:
|
||||
memory_info = item.memory_info
|
||||
print("Memory type: {}".format(memory_info[DeviceMemoryInfoKeys.NAME]))
|
||||
showdata(item.data,
|
||||
args.offset + memory_info[DeviceMemoryInfoKeys.ADDRESS],
|
||||
memory_info[DeviceMemoryInfoKeys.PAGE_SIZE])
|
||||
print("\n")
|
||||
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _action_verify(backend, args):
|
||||
hexfile = False
|
||||
binary = False
|
||||
literal = False
|
||||
filepath = None
|
||||
if args.filename is not None:
|
||||
filepath = os.path.normpath(args.filename)
|
||||
_, postfix = _get_file_prefix_and_postfix(filepath)
|
||||
# If it ends in hex, use intel hex format, else binary
|
||||
if postfix == 'hex':
|
||||
hexfile = True
|
||||
else:
|
||||
binary = True
|
||||
if args.literal is not None:
|
||||
literal = True
|
||||
if args.filename is not None:
|
||||
print("Both file and literal value was specified. Literal verify will be ignored in favor of file verify")
|
||||
literal = False
|
||||
|
||||
if hexfile:
|
||||
print("Verifying...")
|
||||
verify_status = verify_flash_from_hex(args.filename, backend, max_read_chunk=args.max_read_chunk)
|
||||
if verify_status is True:
|
||||
print("Verify successful. Data in flash matches data in specified hex-file")
|
||||
elif binary:
|
||||
print("Verifying...")
|
||||
verify_status = verify_flash_from_bin(args.filename, backend, args.offset, max_read_chunk=args.max_read_chunk)
|
||||
if verify_status is True:
|
||||
print("Verify successful. Data in flash matches data in specified bin-file")
|
||||
elif literal:
|
||||
print("Verifying...")
|
||||
flash_data = backend.read_memory('flash', args.offset, len(args.literal), max_read_chunk=args.max_read_chunk)[0].data
|
||||
compare(flash_data, args.literal, args.offset)
|
||||
print("Verify successful. Data in flash matches literal data specified")
|
||||
else:
|
||||
raise Exception('No file or literal specified for verify')
|
||||
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _get_file_prefix_and_postfix(filepath):
|
||||
"""
|
||||
Get file prefix and postfix from the filepath
|
||||
|
||||
If the file name in the filepath has not file extension the file is supposed to be a binary file
|
||||
:param filepath: File name and full path
|
||||
:return: prefix, postfix
|
||||
"""
|
||||
prefix = filepath.split('.')[0]
|
||||
postfix = filepath.split('.')[-1].lower()
|
||||
# If no "." is found in the filepath
|
||||
if postfix == prefix:
|
||||
postfix = "bin"
|
||||
|
||||
return prefix, postfix
|
||||
|
||||
|
||||
def _extract_writeable_memories(memory_segments):
|
||||
"""
|
||||
Take a list of memory segments and return the segments that can be written
|
||||
|
||||
:param memory_segments: List of namedtuples with two fields: data and memory_info. data contains a byte array of
|
||||
raw data bytes and memory_info is a dictionary with memory information (as defined in
|
||||
deviceinfo.deviceinfo.DeviceMemoryInfo).
|
||||
:return: List of namedtuples (a subset of the memory_segments input parameter) only containing memory segments
|
||||
that can be written
|
||||
"""
|
||||
writeable_segments = []
|
||||
for segment in memory_segments:
|
||||
if segment.memory_info[DeviceMemoryInfoKeys.NAME] in WRITE_TO_HEX_MEMORIES:
|
||||
writeable_segments.append(segment)
|
||||
return writeable_segments
|
||||
|
||||
|
||||
def _action_write(backend, args):
|
||||
# If a filename is specified, read from it
|
||||
if args.filename is not None:
|
||||
filepath = os.path.normpath(args.filename)
|
||||
_, postfix = _get_file_prefix_and_postfix(filepath)
|
||||
# If it ends in hex, use intel hex format, else binary
|
||||
if postfix == 'hex':
|
||||
# Hexfiles contain addressing information that cannot be remapped, so offset/memory are not allowed here
|
||||
if args.offset:
|
||||
print("Offset cannot be specified when writing hex file")
|
||||
return STATUS_FAILURE
|
||||
|
||||
if args.memory != MemoryNameAliases.ALL:
|
||||
print("Memory area cannot be specified when writing hex file")
|
||||
return STATUS_FAILURE
|
||||
|
||||
result = read_memories_from_hex(args.filename, backend.device_memory_info)
|
||||
|
||||
print("Writing from hex file...")
|
||||
|
||||
_write_memory_segments(backend, result, args.verify, blocksize=args.blocksize, pagewrite_delay=args.pagewrite_delay)
|
||||
else:
|
||||
with open(filepath, "rb") as binfile:
|
||||
data_from_file = bytearray(binfile.read())
|
||||
|
||||
# Prepare and write data
|
||||
print("Writing from binary file...")
|
||||
# When writing data to target the data might be pagealigned so we make a copy to avoid verifying
|
||||
# more than needed (in case verify option is enabled)
|
||||
data_to_write = copy(data_from_file)
|
||||
backend.write_memory(data_to_write, args.memory, args.offset)
|
||||
if args.verify:
|
||||
print("Verifying from binary file...")
|
||||
# Verify content, an exception is thrown on mismatch
|
||||
backend.verify_memory(data_from_file, args.memory, args.offset)
|
||||
elif args.literal:
|
||||
# Prepare and write data
|
||||
print("Writing literal values...")
|
||||
backend.write_memory(bytearray(args.literal), args.memory, args.offset)
|
||||
if args.verify:
|
||||
print("Verifying literal values...")
|
||||
# Verify content, an exception is thrown on mismatch
|
||||
backend.verify_memory(bytearray(args.literal), args.memory, args.offset)
|
||||
else:
|
||||
print("Error: for writing use either -f <file> or -l <literal>")
|
||||
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _write_memory_segments(backend, memory_segments, verify, blocksize = 0, pagewrite_delay=0):
|
||||
"""
|
||||
Write content of list of memory segments
|
||||
|
||||
:param backend: pymcuprog Backend instance
|
||||
:param memory_segments: List of namedtuples with two fields: data and memory_info. data contains a byte array of
|
||||
raw data bytes and memory_info is a dictionary with memory information (as defined in
|
||||
deviceinfo.deviceinfo.DeviceMemoryInfo).
|
||||
:param verify: If True verify the written data by reading it back and compare
|
||||
:param blocksize: this is a signal to write_memory for updiserial when writing flash; if 0 or not supplied
|
||||
do not use blocks (equivalent to blocksize == 2 bytes or 1 word). If -1, it will set tje blocksize to
|
||||
the page size of the target chip, which can imcrease write speed more than 10:1. Any other number will
|
||||
be used as supplied. Even numbers up to the page size are recommended.
|
||||
Any other negative number is invalid, and is zero'ed out.
|
||||
"""
|
||||
for segment in memory_segments:
|
||||
memory_name = segment.memory_info[DeviceMemoryInfoKeys.NAME]
|
||||
print("Writing {}...".format(memory_name))
|
||||
backend.write_memory(segment.data, memory_name, segment.offset, blocksize=blocksize, pagewrite_delay=pagewrite_delay)
|
||||
if verify:
|
||||
print("Verifying {}...".format(memory_name))
|
||||
verify_ok = backend.verify_memory(segment.data, memory_name, segment.offset)
|
||||
if verify_ok:
|
||||
print("OK")
|
||||
else:
|
||||
print("Verification failed!")
|
||||
|
||||
|
||||
def _action_reset(backend):
|
||||
backend.hold_in_reset()
|
||||
# Wait a bit to make sure the device has entered reset
|
||||
# If needed this sleep could be made configurable by a CLI parameter,
|
||||
# but for now a hardcoded value is assumed to be sufficient
|
||||
time.sleep(0.1)
|
||||
backend.release_from_reset()
|
||||
return STATUS_SUCCESS
|
||||
|
||||
|
||||
def _debugger_actions(backend, args):
|
||||
"""
|
||||
Debugger related actions
|
||||
|
||||
Targetless actions only involving the debugger. Only available on HID tools
|
||||
"""
|
||||
status = None
|
||||
if args.action == 'getvoltage':
|
||||
status = _action_getvoltage(backend)
|
||||
if args.action == 'getsupplyvoltage':
|
||||
status = _action_getsupplyvoltage(backend)
|
||||
if args.action == 'getusbvoltage':
|
||||
status = _action_getusbvoltage(backend)
|
||||
if args.action == 'setsupplyvoltage':
|
||||
status = _action_setsupplyvoltage(backend, args.literal)
|
||||
if args.action == 'reboot-debugger':
|
||||
status = _action_reboot_debugger(backend)
|
||||
|
||||
return status
|
||||
|
||||
|
||||
def _programming_actions(backend, args):
|
||||
status = None
|
||||
# Ping: checks that the device is there by reading its ID, or equivalent
|
||||
if args.action == "ping":
|
||||
status = _action_ping(backend)
|
||||
# Erase: perform a full chip erase, or memtype-only erase if specified
|
||||
elif args.action == "erase":
|
||||
status = _action_erase(backend, args)
|
||||
# Reading data:
|
||||
elif args.action == "read":
|
||||
status = _action_read(backend, args)
|
||||
elif args.action == "write":
|
||||
status = _action_write(backend, args)
|
||||
elif args.action == "reset":
|
||||
status = _action_reset(backend)
|
||||
elif args.action == "verify":
|
||||
status = _action_verify(backend, args)
|
||||
else:
|
||||
print("Unknown command '{0:s}'".format(args.action))
|
||||
status = STATUS_FAILURE
|
||||
|
||||
return status
|
||||
|
||||
|
||||
def _setup_tool_connection(args):
|
||||
toolconnection = None
|
||||
|
||||
# Parse the requested tool from the CLI
|
||||
if args.tool == "uart":
|
||||
# Embedded GPIO/UART tool (eg: raspberry pi) => no USB connection
|
||||
toolconnection = ToolSerialConnection(serialport=args.uart)
|
||||
else:
|
||||
usb_serial = args.serialnumber
|
||||
product = args.tool
|
||||
if usb_serial and product:
|
||||
print("Connecting to {0:s} ({1:s})'".format(product, usb_serial))
|
||||
else:
|
||||
if usb_serial:
|
||||
print("Connecting to any tool with USB serial number '{0:s}'".format(usb_serial))
|
||||
elif product:
|
||||
print("Connecting to any {0:s}".format(product))
|
||||
else:
|
||||
print("Connecting to anything possible")
|
||||
toolconnection = ToolUsbHidConnection(serialnumber=usb_serial, tool_name=product)
|
||||
|
||||
return toolconnection
|
||||
|
||||
|
||||
def _select_target_device(backend, args):
|
||||
device_mounted = None
|
||||
device_selected = None
|
||||
if args.tool not in ['uart']:
|
||||
# Find out from the board (kit) if a device is mounted
|
||||
device_mounted = backend.read_kit_device()
|
||||
if device_mounted is not None:
|
||||
device_mounted = device_mounted.lower()
|
||||
print("Device mounted: '{0:s}'".format(device_mounted))
|
||||
|
||||
# Parse device field. If unspecified, use the board's device
|
||||
if args.device:
|
||||
device_selected = args.device.lower()
|
||||
else:
|
||||
if device_mounted is None:
|
||||
print("Unable to determine on-board target! Please specify device using -d <device>")
|
||||
else:
|
||||
print("No device specified. Using on-board target ({0:s})".format(device_mounted))
|
||||
device_selected = device_mounted
|
||||
|
||||
# Mismatch. Allow user to proceed at own risk.
|
||||
if device_mounted is not None and device_selected != device_mounted:
|
||||
print("Warning: you are attempting to use a device which is not the one which was mounted on the kit!")
|
||||
print("Cut all straps between the debugger and the on-board target when accessing an external device!")
|
||||
|
||||
return device_selected
|
||||
|
||||
|
||||
def _start_session(backend, device, args):
|
||||
"""
|
||||
Setup the session and try to build the stack for this device
|
||||
"""
|
||||
sessionconfig = SessionConfig(device)
|
||||
|
||||
# -c clock argument
|
||||
# allow Hz, or kHz ending in 'k' (eg: 100k) or MHz ending in 'M' eg (1M)
|
||||
if args.clk:
|
||||
if args.clk[-1] == 'k':
|
||||
clk = int(args.clk.strip('k')) * 1000
|
||||
elif args.clk[-1] == 'M':
|
||||
clk = int(args.clk.strip('M')) * 1000000
|
||||
else:
|
||||
clk = int(args.clk)
|
||||
|
||||
sessionconfig.interface_speed = clk
|
||||
|
||||
# Translate args into "special_options" to pass down the stack
|
||||
sessionconfig.special_options = {}
|
||||
if args.high_voltage:
|
||||
sessionconfig.special_options['high-voltage'] = args.high_voltage
|
||||
if args.user_row_locked_device:
|
||||
sessionconfig.special_options['user-row-locked-device'] = args.user_row_locked_device
|
||||
if args.chip_erase_locked_device:
|
||||
sessionconfig.special_options['chip-erase-locked-device'] = args.chip_erase_locked_device
|
||||
|
||||
# Programming user row on locked parts and erasing to unlock are mutually exclusive
|
||||
if args.chip_erase_locked_device and args.user_row_locked_device:
|
||||
print("User row cannot be written on a locked device while erasing and unlocking.")
|
||||
return STATUS_FAILURE
|
||||
|
||||
if args.interface:
|
||||
sessionconfig.interface = args.interface
|
||||
|
||||
if args.packpath:
|
||||
sessionconfig.packpath = args.packpath
|
||||
|
||||
status = STATUS_SUCCESS
|
||||
try:
|
||||
backend.start_session(sessionconfig)
|
||||
except PymcuprogDeviceLockedError:
|
||||
print("The device is in a locked state and is not accessible; a chip erase is required.")
|
||||
print("Locked AVR UPDI devices can:")
|
||||
print(" - be unlocked using command: erase --chip-erase-locked-device")
|
||||
print(" - write user row values using command: write --user-row-locked-device")
|
||||
status = STATUS_FAILURE_LOCKED
|
||||
except PymcuprogNotSupportedError:
|
||||
print("Unable to setup stack for device {0:s}".format(sessionconfig.device))
|
||||
print("Currently supported devices (in 'devices' folder):")
|
||||
device_list = get_supported_devices()
|
||||
print(', '.join(map(str, device_list)))
|
||||
status = STATUS_FAILURE
|
||||
except PymcuprogSessionConfigError as error:
|
||||
print("Unable to start session: {}".format(error))
|
||||
status = STATUS_FAILURE
|
||||
|
||||
return status
|
||||
@@ -0,0 +1,268 @@
|
||||
"""
|
||||
Application layer for UPDI stack
|
||||
"""
|
||||
from logging import getLogger
|
||||
from pymcuprog.pymcuprog_errors import PymcuprogError
|
||||
from . import constants
|
||||
from .link import UpdiDatalink16bit, UpdiDatalink24bit
|
||||
from .nvm import NvmUpdi, NvmUpdiTinyMega, NvmUpdiAvrDx
|
||||
from .readwrite import UpdiReadWrite
|
||||
from .physical import UpdiPhysical
|
||||
from .timeout import Timeout
|
||||
|
||||
|
||||
def decode_sib(sib):
|
||||
"""
|
||||
Turns the SIB into something readable
|
||||
:param sib: SIB data to decode
|
||||
"""
|
||||
sib_info = {}
|
||||
logger = getLogger(__name__)
|
||||
|
||||
sib = sib.replace(b"\x00", b"")
|
||||
|
||||
try:
|
||||
sib_string = sib.decode('ascii')
|
||||
except UnicodeDecodeError:
|
||||
return None
|
||||
|
||||
if len(sib_string) < 19:
|
||||
return None
|
||||
|
||||
logger.info("SIB: '%s'", sib_string)
|
||||
|
||||
# Parse fixed width fields according to spec
|
||||
family = sib[0:7].strip().decode()
|
||||
logger.info("Device family ID: '%s'", family)
|
||||
sib_info['family'] = family
|
||||
|
||||
nvm = sib[8:11].strip().decode()
|
||||
logger.info("NVM interface: '%s'", nvm)
|
||||
sib_info['NVM'] = nvm.split(':')[1]
|
||||
|
||||
ocd = sib[11:14].strip().decode()
|
||||
logger.info("Debug interface: '%s'", ocd)
|
||||
sib_info['OCD'] = ocd.split(':')[1]
|
||||
|
||||
osc = sib[15:19].strip().decode()
|
||||
logger.info("PDI oscillator: '%s'", osc)
|
||||
sib_info['OSC'] = osc
|
||||
|
||||
extra = sib[19:].strip().decode()
|
||||
logger.info("Extra info: '%s'", extra)
|
||||
sib_info['extra'] = extra
|
||||
|
||||
return sib_info
|
||||
|
||||
|
||||
class UpdiApplication:
|
||||
"""
|
||||
Generic application layer for UPDI
|
||||
"""
|
||||
|
||||
def __init__(self, serialport, baud, device=None):
|
||||
self.logger = getLogger(__name__)
|
||||
self.device = device
|
||||
# Build the UPDI stack:
|
||||
# Create a physical
|
||||
|
||||
baud_temp = min(baud, 115200)
|
||||
self.phy = UpdiPhysical(serialport, baud_temp)
|
||||
|
||||
# Create a DL - use 16-bit until otherwise known
|
||||
datalink = UpdiDatalink16bit()
|
||||
|
||||
# Set the physical for use in the datalink
|
||||
datalink.set_physical(self.phy)
|
||||
|
||||
# Init (active) the datalink
|
||||
datalink.init_datalink()
|
||||
|
||||
# set the actual baud
|
||||
datalink.change_baud(baud)
|
||||
|
||||
# Create a read write access layer using this data link
|
||||
self.readwrite = UpdiReadWrite(datalink)
|
||||
|
||||
# Create an NVM driver
|
||||
self.nvm = NvmUpdi(self.readwrite, self.device)
|
||||
|
||||
def read_device_info(self):
|
||||
"""
|
||||
Reads out device information from various sources
|
||||
"""
|
||||
sib = self.readwrite.read_sib()
|
||||
sib_info = decode_sib(sib)
|
||||
|
||||
if sib_info is None:
|
||||
self.logger.warning("Cannot read SIB, hard reset...")
|
||||
self.phy.send_double_break()
|
||||
sib = self.readwrite.read_sib()
|
||||
sib_info = decode_sib(sib)
|
||||
if sib_info is None:
|
||||
self.logger.error("Hard reset failed.")
|
||||
raise RuntimeError("Failed to read device info.")
|
||||
|
||||
if sib_info['NVM'] == '2':
|
||||
# This is a Dx-family member, and needs new DL and NVM
|
||||
self.logger.info("Using 24-bit UPDI")
|
||||
# Create new DL
|
||||
datalink = UpdiDatalink24bit()
|
||||
# Use the existing PHY
|
||||
datalink.set_physical(self.phy)
|
||||
# And re-init
|
||||
datalink.init_datalink()
|
||||
# Create a read write access layer using this data link
|
||||
self.readwrite = UpdiReadWrite(datalink)
|
||||
# Create new NVM driver
|
||||
self.nvm = NvmUpdiAvrDx(self.readwrite, self.device)
|
||||
else:
|
||||
self.logger.info("Using 16-bit UPDI")
|
||||
# DL is correctly configured already
|
||||
# Create new NVM driver
|
||||
self.nvm = NvmUpdiTinyMega(self.readwrite, self.device)
|
||||
|
||||
self.logger.info("PDI revision = 0x%02X", self.readwrite.read_cs(constants.UPDI_CS_STATUSA) >> 4)
|
||||
if self.in_prog_mode():
|
||||
if self.device is not None:
|
||||
devid = self.read_data(self.device.sigrow_address, 3)
|
||||
devrev = self.read_data(self.device.syscfg_address + 1, 1)
|
||||
self.logger.info("Device ID from pyupdi = '%02X%02X%02X' rev '%s'", devid[0], devid[1], devid[2],
|
||||
chr(ord('A') + devrev[0]))
|
||||
|
||||
def read_data(self, address, size):
|
||||
"""
|
||||
Reads a number of bytes of data from UPDI
|
||||
:param address: address to write to
|
||||
:param size: number of bytes to read
|
||||
"""
|
||||
return self.readwrite.read_data(address, size)
|
||||
|
||||
def read_data_words(self, address, words):
|
||||
"""
|
||||
Reads a number of words of data from UPDI
|
||||
:param address: address to write to
|
||||
:param words: number of words to read
|
||||
"""
|
||||
return self.readwrite.read_data_words(address, words)
|
||||
|
||||
def write_data_words(self, address, data):
|
||||
"""
|
||||
Writes a number of words to memory
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
return self.readwrite.write_data_words(address, data)
|
||||
|
||||
def write_data(self, address, data):
|
||||
"""
|
||||
Writes a number of bytes to memory
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
return self.write_data(address, data)
|
||||
|
||||
def in_prog_mode(self):
|
||||
"""
|
||||
Checks whether the NVM PROG flag is up
|
||||
"""
|
||||
if self.readwrite.read_cs(constants.UPDI_ASI_SYS_STATUS) & (1 << constants.UPDI_ASI_SYS_STATUS_NVMPROG):
|
||||
return True
|
||||
return False
|
||||
|
||||
def wait_unlocked(self, timeout_ms):
|
||||
"""
|
||||
Waits for the device to be unlocked.
|
||||
All devices boot up as locked until proven otherwise
|
||||
:param timeout_ms: number of milliseconts to wait
|
||||
"""
|
||||
timeout = Timeout(timeout_ms)
|
||||
|
||||
while not timeout.expired():
|
||||
if not self.readwrite.read_cs(constants.UPDI_ASI_SYS_STATUS) & (
|
||||
1 << constants.UPDI_ASI_SYS_STATUS_LOCKSTATUS):
|
||||
return True
|
||||
|
||||
self.logger.error("Timeout waiting for device to unlock")
|
||||
return False
|
||||
|
||||
def unlock(self):
|
||||
"""
|
||||
Unlock by chip erase
|
||||
"""
|
||||
# Put in the key
|
||||
self.readwrite.write_key(constants.UPDI_KEY_64, constants.UPDI_KEY_CHIPERASE)
|
||||
|
||||
# Check key status
|
||||
key_status = self.readwrite.read_cs(constants.UPDI_ASI_KEY_STATUS)
|
||||
self.logger.debug("Key status = 0x%02X", key_status)
|
||||
|
||||
if not key_status & (1 << constants.UPDI_ASI_KEY_STATUS_CHIPERASE):
|
||||
raise PymcuprogError("Key not accepted")
|
||||
|
||||
# Toggle reset
|
||||
self.reset(apply_reset=True)
|
||||
self.reset(apply_reset=False)
|
||||
|
||||
# And wait for unlock
|
||||
if not self.wait_unlocked(100):
|
||||
raise PymcuprogError("Failed to chip erase using key")
|
||||
|
||||
def enter_progmode(self):
|
||||
"""
|
||||
Enters into NVM programming mode
|
||||
"""
|
||||
# First check if NVM is already enabled
|
||||
if self.in_prog_mode():
|
||||
self.logger.info("Already in NVM programming mode")
|
||||
return True
|
||||
|
||||
self.logger.info("Entering NVM programming mode")
|
||||
|
||||
# Put in the key
|
||||
self.readwrite.write_key(constants.UPDI_KEY_64, constants.UPDI_KEY_NVM)
|
||||
|
||||
# Check key status
|
||||
key_status = self.readwrite.read_cs(constants.UPDI_ASI_KEY_STATUS)
|
||||
self.logger.debug("Key status = 0x%02X", key_status)
|
||||
|
||||
if not key_status & (1 << constants.UPDI_ASI_KEY_STATUS_NVMPROG):
|
||||
self.logger.error("Key status = 0x%02X", key_status)
|
||||
raise IOError("Key not accepted")
|
||||
|
||||
# Toggle reset
|
||||
self.reset(apply_reset=True)
|
||||
self.reset(apply_reset=False)
|
||||
|
||||
# And wait for unlock
|
||||
if not self.wait_unlocked(100):
|
||||
raise IOError("Failed to enter NVM programming mode: device is locked")
|
||||
|
||||
# Check for NVMPROG flag
|
||||
if not self.in_prog_mode():
|
||||
raise IOError("Failed to enter NVM programming mode")
|
||||
|
||||
self.logger.debug("Now in NVM programming mode")
|
||||
return True
|
||||
|
||||
def leave_progmode(self):
|
||||
"""
|
||||
Disables UPDI which releases any keys enabled
|
||||
"""
|
||||
self.logger.info("Leaving NVM programming mode")
|
||||
self.reset(apply_reset=True)
|
||||
self.reset(apply_reset=False)
|
||||
self.readwrite.write_cs(constants.UPDI_CS_CTRLB,
|
||||
(1 << constants.UPDI_CTRLB_UPDIDIS_BIT) | (1 << constants.UPDI_CTRLB_CCDETDIS_BIT))
|
||||
|
||||
def reset(self, apply_reset):
|
||||
"""
|
||||
Applies or releases an UPDI reset condition
|
||||
:param apply_reset: True to apply, False to release
|
||||
"""
|
||||
if apply_reset:
|
||||
self.logger.info("Apply reset")
|
||||
self.readwrite.write_cs(constants.UPDI_ASI_RESET_REQ, constants.UPDI_RESET_REQ_VALUE)
|
||||
else:
|
||||
self.logger.info("Release reset")
|
||||
self.readwrite.write_cs(constants.UPDI_ASI_RESET_REQ, 0x00)
|
||||
110
software/tools/pymcuprog/libs/pymcuprog/serialupdi/constants.py
Normal file
110
software/tools/pymcuprog/libs/pymcuprog/serialupdi/constants.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""
|
||||
UPDI protocol constants
|
||||
"""
|
||||
# UPDI commands and control definitions
|
||||
UPDI_BREAK = 0x00
|
||||
|
||||
UPDI_LDS = 0x00
|
||||
UPDI_STS = 0x40
|
||||
UPDI_LD = 0x20
|
||||
UPDI_ST = 0x60
|
||||
UPDI_LDCS = 0x80
|
||||
UPDI_STCS = 0xC0
|
||||
UPDI_REPEAT = 0xA0
|
||||
UPDI_KEY = 0xE0
|
||||
|
||||
UPDI_PTR = 0x00
|
||||
UPDI_PTR_INC = 0x04
|
||||
UPDI_PTR_ADDRESS = 0x08
|
||||
|
||||
UPDI_ADDRESS_8 = 0x00
|
||||
UPDI_ADDRESS_16 = 0x04
|
||||
UPDI_ADDRESS_24 = 0x08
|
||||
|
||||
UPDI_DATA_8 = 0x00
|
||||
UPDI_DATA_16 = 0x01
|
||||
UPDI_DATA_24 = 0x02
|
||||
|
||||
UPDI_KEY_SIB = 0x04
|
||||
UPDI_KEY_KEY = 0x00
|
||||
|
||||
UPDI_KEY_64 = 0x00
|
||||
UPDI_KEY_128 = 0x01
|
||||
UPDI_KEY_256 = 0x02
|
||||
|
||||
UPDI_SIB_8BYTES = UPDI_KEY_64
|
||||
UPDI_SIB_16BYTES = UPDI_KEY_128
|
||||
UPDI_SIB_32BYTES = UPDI_KEY_256
|
||||
|
||||
UPDI_REPEAT_BYTE = 0x00
|
||||
UPDI_REPEAT_WORD = 0x01
|
||||
|
||||
UPDI_PHY_SYNC = 0x55
|
||||
UPDI_PHY_ACK = 0x40
|
||||
|
||||
UPDI_MAX_REPEAT_SIZE = (0xFF+1) # Repeat counter of 1-byte, with off-by-one counting
|
||||
|
||||
# CS and ASI Register Address map
|
||||
UPDI_CS_STATUSA = 0x00
|
||||
UPDI_CS_STATUSB = 0x01
|
||||
UPDI_CS_CTRLA = 0x02
|
||||
UPDI_CS_CTRLB = 0x03
|
||||
UPDI_ASI_KEY_STATUS = 0x07
|
||||
UPDI_ASI_RESET_REQ = 0x08
|
||||
UPDI_ASI_CTRLA = 0x09
|
||||
UPDI_ASI_SYS_CTRLA = 0x0A
|
||||
UPDI_ASI_SYS_STATUS = 0x0B
|
||||
UPDI_ASI_CRC_STATUS = 0x0C
|
||||
|
||||
UPDI_CTRLA_IBDLY_BIT = 7
|
||||
UPDI_CTRLB_CCDETDIS_BIT = 3
|
||||
UPDI_CTRLB_UPDIDIS_BIT = 2
|
||||
|
||||
UPDI_KEY_NVM = b"NVMProg "
|
||||
UPDI_KEY_CHIPERASE = b"NVMErase"
|
||||
|
||||
UPDI_ASI_STATUSA_REVID = 4
|
||||
UPDI_ASI_STATUSB_PESIG = 0
|
||||
|
||||
UPDI_ASI_KEY_STATUS_CHIPERASE = 3
|
||||
UPDI_ASI_KEY_STATUS_NVMPROG = 4
|
||||
UPDI_ASI_KEY_STATUS_UROWWRITE = 5
|
||||
|
||||
UPDI_ASI_SYS_STATUS_RSTSYS = 5
|
||||
UPDI_ASI_SYS_STATUS_INSLEEP = 4
|
||||
UPDI_ASI_SYS_STATUS_NVMPROG = 3
|
||||
UPDI_ASI_SYS_STATUS_UROWPROG = 2
|
||||
UPDI_ASI_SYS_STATUS_LOCKSTATUS = 0
|
||||
|
||||
UPDI_RESET_REQ_VALUE = 0x59
|
||||
|
||||
# FLASH CONTROLLER
|
||||
UPDI_NVMCTRL_CTRLA = 0x00
|
||||
UPDI_NVMCTRL_CTRLB = 0x01
|
||||
UPDI_NVMCTRL_STATUS = 0x02
|
||||
UPDI_NVMCTRL_INTCTRL = 0x03
|
||||
UPDI_NVMCTRL_INTFLAGS = 0x04
|
||||
UPDI_NVMCTRL_DATAL = 0x06
|
||||
UPDI_NVMCTRL_DATAH = 0x07
|
||||
UPDI_NVMCTRL_ADDRL = 0x08
|
||||
UPDI_NVMCTRL_ADDRH = 0x09
|
||||
|
||||
# NVMCTRL v0 CTRLA
|
||||
UPDI_V0_NVMCTRL_CTRLA_NOP = 0x00
|
||||
UPDI_V0_NVMCTRL_CTRLA_WRITE_PAGE = 0x01
|
||||
UPDI_V0_NVMCTRL_CTRLA_ERASE_PAGE = 0x02
|
||||
UPDI_V0_NVMCTRL_CTRLA_ERASE_WRITE_PAGE = 0x03
|
||||
UPDI_V0_NVMCTRL_CTRLA_PAGE_BUFFER_CLR = 0x04
|
||||
UPDI_V0_NVMCTRL_CTRLA_CHIP_ERASE = 0x05
|
||||
UPDI_V0_NVMCTRL_CTRLA_ERASE_EEPROM = 0x06
|
||||
UPDI_V0_NVMCTRL_CTRLA_WRITE_FUSE = 0x07
|
||||
|
||||
# NVMCTRL v1 CTRLA
|
||||
UPDI_V1_NVMCTRL_CTRLA_NOCMD = 0x00
|
||||
UPDI_V1_NVMCTRL_CTRLA_FLASH_WRITE = 0x02
|
||||
UPDI_V1_NVMCTRL_CTRLA_EEPROM_ERASE_WRITE = 0x13
|
||||
UPDI_V1_NVMCTRL_CTRLA_CHIP_ERASE = 0x20
|
||||
|
||||
UPDI_NVM_STATUS_WRITE_ERROR = 2
|
||||
UPDI_NVM_STATUS_EEPROM_BUSY = 1
|
||||
UPDI_NVM_STATUS_FLASH_BUSY = 0
|
||||
431
software/tools/pymcuprog/libs/pymcuprog/serialupdi/link.py
Normal file
431
software/tools/pymcuprog/libs/pymcuprog/serialupdi/link.py
Normal file
@@ -0,0 +1,431 @@
|
||||
"""
|
||||
Link layer in UPDI protocol stack
|
||||
"""
|
||||
from logging import getLogger
|
||||
|
||||
from pymcuprog.pymcuprog_errors import PymcuprogError
|
||||
from . import constants
|
||||
|
||||
class UpdiDatalink:
|
||||
"""
|
||||
UPDI data link class handles the UPDI data protocol within the device
|
||||
"""
|
||||
|
||||
LDCS_RESPONSE_BYTES = 1
|
||||
|
||||
def __init__(self):
|
||||
self.logger = getLogger(__name__)
|
||||
self.updi_phy = None
|
||||
|
||||
def set_physical(self, physical):
|
||||
"""
|
||||
Inject a serial-port based physical layer for use by this DL
|
||||
"""
|
||||
self.updi_phy = physical
|
||||
|
||||
def _init_session_parameters(self):
|
||||
"""
|
||||
Set the inter-byte delay bit and disable collision detection
|
||||
"""
|
||||
self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT)
|
||||
self.stcs(constants.UPDI_CS_CTRLA, 0x06)
|
||||
|
||||
def init_datalink(self):
|
||||
"""
|
||||
Init DL layer
|
||||
"""
|
||||
self._init_session_parameters()
|
||||
# Check
|
||||
if not self._check_datalink():
|
||||
# Send double break if all is not well, and re-check
|
||||
self.updi_phy.send_double_break()
|
||||
self._init_session_parameters()
|
||||
if not self._check_datalink():
|
||||
raise PymcuprogError("UPDI initialisation failed")
|
||||
|
||||
def change_baud(self, baud):
|
||||
if self.updi_phy is not None:
|
||||
self.stcs(constants.UPDI_CS_CTRLA, 0x06)
|
||||
if baud <= 115200:
|
||||
self.stcs(constants.UPDI_ASI_CTRLA, 0x03)
|
||||
elif baud > 230400:
|
||||
self.stcs(constants.UPDI_ASI_CTRLA, 0x01)
|
||||
else:
|
||||
self.stcs(constants.UPDI_ASI_CTRLA, 0x02)
|
||||
self.updi_phy.change_baud(baud)
|
||||
|
||||
def _check_datalink(self):
|
||||
"""
|
||||
Check UPDI by loading CS STATUSA
|
||||
"""
|
||||
try:
|
||||
if self.ldcs(constants.UPDI_CS_STATUSA) != 0:
|
||||
self.logger.info("UPDI init OK")
|
||||
return True
|
||||
except PymcuprogError:
|
||||
self.logger.warning("Check failed")
|
||||
return False
|
||||
self.logger.info("UPDI not OK - reinitialisation required")
|
||||
return False
|
||||
|
||||
def ldcs(self, address):
|
||||
"""
|
||||
Load data from Control/Status space
|
||||
:param address: address to load
|
||||
"""
|
||||
self.logger.debug("LDCS from 0x%02X", address)
|
||||
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)])
|
||||
response = self.updi_phy.receive(self.LDCS_RESPONSE_BYTES)
|
||||
numbytes_received = len(response)
|
||||
if numbytes_received != self.LDCS_RESPONSE_BYTES:
|
||||
raise PymcuprogError("Unexpected number of bytes in response: "
|
||||
"{} byte(s) expected {} byte(s)".format(numbytes_received, self.LDCS_RESPONSE_BYTES))
|
||||
|
||||
return response[0]
|
||||
|
||||
def stcs(self, address, value):
|
||||
"""
|
||||
Store a value to Control/Status space
|
||||
:param address: address to store to
|
||||
:param value: value to write
|
||||
"""
|
||||
self.logger.debug("STCS to 0x%02X", address)
|
||||
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value])
|
||||
|
||||
def ld_ptr_inc(self, size):
|
||||
"""
|
||||
Loads a number of bytes from the pointer location with pointer post-increment
|
||||
:param size: number of bytes to load
|
||||
:return: values read
|
||||
"""
|
||||
self.logger.debug("LD8 from ptr++")
|
||||
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
|
||||
constants.UPDI_DATA_8])
|
||||
return self.updi_phy.receive(size)
|
||||
|
||||
def ld_ptr_inc16(self, words):
|
||||
"""
|
||||
Load a 16-bit word value from the pointer location with pointer post-increment.
|
||||
For improved performance of serialupdi for Arduino, send the REP instruction in the same command as LD
|
||||
:param words: number of words to load
|
||||
:return: values read
|
||||
"""
|
||||
self.logger.debug("LD16 from ptr++")
|
||||
# combine REP, words with ld *ptr++
|
||||
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE,
|
||||
(words - 1) & 0xFF, constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
|
||||
constants.UPDI_DATA_16])
|
||||
return self.updi_phy.receive(words << 1)
|
||||
|
||||
def st_ptr_inc(self, data):
|
||||
"""
|
||||
Store data to the pointer location with pointer post-increment
|
||||
:param data: data to store
|
||||
"""
|
||||
self.logger.debug("ST8 to *ptr++")
|
||||
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8,
|
||||
data[0]])
|
||||
response = self.updi_phy.receive(1)
|
||||
|
||||
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
|
||||
raise PymcuprogError("ACK error with st_ptr_inc")
|
||||
|
||||
num = 1
|
||||
while num < len(data):
|
||||
self.updi_phy.send([data[num]])
|
||||
response = self.updi_phy.receive(1)
|
||||
|
||||
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
|
||||
raise PymcuprogError("Error with st_ptr_inc")
|
||||
num += 1
|
||||
|
||||
def st_ptr_inc16(self, data):
|
||||
"""
|
||||
Store a 16-bit word value to the pointer location with pointer post-increment
|
||||
:param data: data to store
|
||||
"""
|
||||
self.logger.debug("ST16 to *ptr++")
|
||||
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC |
|
||||
constants.UPDI_DATA_16, data[0], data[1]])
|
||||
response = self.updi_phy.receive(1)
|
||||
|
||||
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
|
||||
raise PymcuprogError("ACK error with st_ptr_inc16")
|
||||
|
||||
num = 2
|
||||
while num < len(data):
|
||||
self.updi_phy.send([data[num], data[num + 1]])
|
||||
response = self.updi_phy.receive(1)
|
||||
|
||||
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
|
||||
raise PymcuprogError("Error with st_ptr_inc16")
|
||||
num += 2
|
||||
|
||||
def st_ptr_inc16_RSD(self, data, blocksize):
|
||||
"""
|
||||
Store a 16-bit word value to the pointer location with pointer post-increment
|
||||
:param data: data to store
|
||||
:blocksize: max number of bytes being sent, None for all.
|
||||
Warning: This does not strictly honor blocksize for values < 6
|
||||
We always glob together the STCS(RSD) and REP commands.
|
||||
But this should pose no problems for compatibility, because your serial adapter can't deal with 6b chunks,
|
||||
none of pymcuprog would work!
|
||||
"""
|
||||
self.logger.debug("ST16 to *ptr++ with RSD, data length: 0x%03X in blocks of: %d", len(data), blocksize)
|
||||
|
||||
#for performance we glob everything together into one USB transfer....
|
||||
repnumber= ((len(data) >> 1) -1)
|
||||
data = [*data, *[constants.UPDI_PHY_SYNC, constants.UPDI_STCS | constants.UPDI_CS_CTRLA, 0x06]]
|
||||
|
||||
if blocksize is None:
|
||||
# Send whole thing at once stcs + repeat + st + (data + stcs)
|
||||
blocksize = 3 + 3 + 2 + len(data)
|
||||
num = 0
|
||||
firstpacket = []
|
||||
if blocksize < 10 :
|
||||
# very small block size - we send pair of 2-byte commands first.
|
||||
firstpacket = [*[constants.UPDI_PHY_SYNC, constants.UPDI_STCS | constants.UPDI_CS_CTRLA, 0x0E],
|
||||
*[constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, (repnumber & 0xFF)]]
|
||||
data = [*[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC |constants.UPDI_DATA_16], *data]
|
||||
num = 0
|
||||
else:
|
||||
firstpacket = [*[constants.UPDI_PHY_SYNC, constants.UPDI_STCS | constants.UPDI_CS_CTRLA , 0x0E],
|
||||
*[constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, (repnumber & 0xFF)],
|
||||
*[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16],
|
||||
*data[:blocksize - 8]]
|
||||
num = blocksize - 8
|
||||
if len(firstpacket) == 64 and blocksize != 64:
|
||||
firstpacket = firstpacket[:32]
|
||||
num = 32-8
|
||||
# workaround bug in D11C as serial adapter compiled with the USB implementation used in the mattairtech core;
|
||||
# this chokes on any block of exactly (!!) 64 bytes. Nobody seems to understand the reason for this bizzare issue.
|
||||
# The D11C as serial adapter is important because the fablab at MIT uses them heavilly, and wishes to upload Arduino sketches to tinyAVR 0/1/2-series
|
||||
# and AVR-Dx-series parts with them. It was desirable from their perspective to have an more accessible firmware (like that Arduino one) as opposed
|
||||
# to a pure-C one. Hence, a workaround is implemented to avoid triggering it. Thankfully, 64-byte blocks being written are not as natural as one might
|
||||
# expect for a power-of-two, since we combine the data with the commands on either side.
|
||||
# This workaround will only be invoked under unusual conditions, specifically if one of these is true:
|
||||
# A. The last page of the hex file has a length of 53 bytes exactly OR
|
||||
# B. The last page of the hex file has a length modulo the write chunk size, of 53 bytes exactly OR
|
||||
# C. A write chunk is specified which is exactly 53 bytes shorter than the page OR
|
||||
# D. The write chunk is specified as 153 when writing to a part with a page size of 512 OR
|
||||
# E. Situations like D in the event that a future product permits REPEAT with 2 bytes of data to support pages larger than 512b.
|
||||
# Conditions C-E will invoke it on the last chunk of every page; For E, here are 6 cursed numbers for 1024b pages with 2 byte repeats, and 2 for 2048b pages
|
||||
# (in addition to pagelength - 52), however, there is no specific reason to expect such a part will be made. In any event, for those conditions, the
|
||||
# performance impact is on the order of 1-4ms per page, the same as the the penalty for each chunk that a write is divided into, hence the speed penalty of
|
||||
# using that write chunk size will be (1-4ms * ceil(page size/chunk size) + 1 instead of 1-4ms * ceil(page size/chunk size), which is always smaller than the
|
||||
# penalty already being accepted for the write chunking.
|
||||
# Conditions A and B will happen at most once per upload; thus the performance penalty would be acceptable even if they happened frequently. However
|
||||
# avr-gcc rarely, if ever, generates odd size hex files. I was not able to get it to by tweaking the sketch I was compiling, so they will occur on
|
||||
# considerably less than 1/64th of the time, if it is even possible for avr-gcc to generate such a file, which is unclear.
|
||||
# In summary, the performance impact is small but noticable in remote corner cases (where chunk size is chosen randomly, or intentionally in an effort to provoke
|
||||
# this bug, and is negligible (at worst 4 ms for 1 out of 32 uploads, assuming upload size modulo page size are evenly distributed and even ) in unlikely cases
|
||||
# (requiring an odd write chunk size such that B could be provoked from an even-length hex file) as well as -possibly- case A if there is indeed a programming
|
||||
# construct that results in a non-even length hex file. It is (barely) worth noting that for a hypothetical part that used a 2 byte REPEAT argument, this could
|
||||
# be provoked with an even sketch size, and so would impose a penalty of less than 4ms on 1 out of 32 uploads.
|
||||
# In light of the fact write chunking is intended as a workaround for an ill-mannered serial adapter in the first place (ex: HT42 workaround in DxCore), this
|
||||
# performance impact is not a concern.
|
||||
# If a write chunk size of 64 is specified, we will not activate this workaround.
|
||||
# -Spence 6/29/21
|
||||
self.updi_phy.send( firstpacket )
|
||||
# if finite block size, this is used.
|
||||
while num < len(data):
|
||||
data_slice = data[num:num+blocksize]
|
||||
if len(data_slice) == 64 and blocksize != 64:
|
||||
data_slice=data[num:num+32] # workaround as above.
|
||||
self.updi_phy.send(data_slice)
|
||||
num += len(data_slice)
|
||||
|
||||
def repeat(self, repeats):
|
||||
"""
|
||||
Store a value to the repeat counter
|
||||
:param repeats: number of repeats requested
|
||||
"""
|
||||
self.logger.debug("Repeat %d", repeats)
|
||||
if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE:
|
||||
self.logger.error("Invalid repeat count of %d", repeats)
|
||||
raise Exception("Invalid repeat count!")
|
||||
repeats -= 1
|
||||
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE,
|
||||
repeats & 0xFF])
|
||||
|
||||
def read_sib(self):
|
||||
"""
|
||||
Read the SIB
|
||||
"""
|
||||
return self.updi_phy.sib()
|
||||
|
||||
def key(self, size, key):
|
||||
"""
|
||||
Write a key
|
||||
:param size: size of key (0=64B, 1=128B, 2=256B)
|
||||
:param key: key value
|
||||
"""
|
||||
self.logger.debug("Writing key")
|
||||
if len(key) != 8 << size:
|
||||
raise PymcuprogError("Invalid KEY length!")
|
||||
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY | size])
|
||||
self.updi_phy.send(list(reversed(list(key))))
|
||||
|
||||
def _st_data_phase(self, values):
|
||||
"""
|
||||
Performs data phase of transaction:
|
||||
receive ACK
|
||||
send data
|
||||
:param values: bytearray of value(s) to send
|
||||
"""
|
||||
response = self.updi_phy.receive(1)
|
||||
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
|
||||
raise PymcuprogError("Error with st")
|
||||
|
||||
self.updi_phy.send(values)
|
||||
response = self.updi_phy.receive(1)
|
||||
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
|
||||
raise PymcuprogError("Error with st")
|
||||
|
||||
|
||||
class UpdiDatalink16bit(UpdiDatalink):
|
||||
"""
|
||||
UPDI data link layer in 16-bit version
|
||||
This means that all addresses and pointers contain 2 bytes
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
UpdiDatalink.__init__(self)
|
||||
self.logger = getLogger(__name__)
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
def ld(self, address):
|
||||
"""
|
||||
Load a single byte direct from a 16-bit address
|
||||
:param address: address to load from
|
||||
:return: value read
|
||||
"""
|
||||
self.logger.info("LD from 0x{0:06X}".format(address))
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
|
||||
address & 0xFF, (address >> 8) & 0xFF])
|
||||
return self.updi_phy.receive(1)[0]
|
||||
|
||||
def ld16(self, address):
|
||||
"""
|
||||
Load a 16-bit word directly from a 16-bit address
|
||||
:param address: address to load from
|
||||
:return: values read
|
||||
"""
|
||||
self.logger.info("LD from 0x{0:06X}".format(address))
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
|
||||
address & 0xFF, (address >> 8) & 0xFF])
|
||||
return self.updi_phy.receive(2)
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
def st(self, address, value):
|
||||
"""
|
||||
Store a single byte value directly to a 16-bit address
|
||||
:param address: address to write to
|
||||
:param value: value to write
|
||||
"""
|
||||
self.logger.info("ST to 0x{0:06X}".format(address))
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
|
||||
address & 0xFF, (address >> 8) & 0xFF])
|
||||
return self._st_data_phase([value & 0xFF])
|
||||
|
||||
def st16(self, address, value):
|
||||
"""
|
||||
Store a 16-bit word value directly to a 16-bit address
|
||||
:param address: address to write to
|
||||
:param value: value to write
|
||||
"""
|
||||
self.logger.info("ST to 0x{0:06X}".format(address))
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
|
||||
address & 0xFF, (address >> 8) & 0xFF])
|
||||
return self._st_data_phase([value & 0xFF, (value >> 8) & 0xFF])
|
||||
|
||||
def st_ptr(self, address):
|
||||
"""
|
||||
Set the pointer location
|
||||
:param address: address to write
|
||||
"""
|
||||
self.logger.info("ST to ptr")
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16,
|
||||
address & 0xFF, (address >> 8) & 0xFF])
|
||||
response = self.updi_phy.receive(1)
|
||||
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
|
||||
raise PymcuprogError("Error with st_ptr")
|
||||
|
||||
|
||||
class UpdiDatalink24bit(UpdiDatalink):
|
||||
"""
|
||||
UPDI data link layer in 24-bit version
|
||||
This means that all addresses and pointers contain 3 bytes
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
UpdiDatalink.__init__(self)
|
||||
self.logger = getLogger(__name__)
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
def ld(self, address):
|
||||
"""
|
||||
Load a single byte direct from a 24-bit address
|
||||
:param address: address to load from
|
||||
:return: value read
|
||||
"""
|
||||
self.logger.info("LD from 0x{0:06X}".format(address))
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
|
||||
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
|
||||
return self.updi_phy.receive(1)[0]
|
||||
|
||||
def ld16(self, address):
|
||||
"""
|
||||
Load a 16-bit word directly from a 24-bit address
|
||||
:param address: address to load from
|
||||
:return: values read
|
||||
"""
|
||||
self.logger.info("LD from 0x{0:06X}".format(address))
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
|
||||
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
|
||||
return self.updi_phy.receive(2)
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
def st(self, address, value):
|
||||
"""
|
||||
Store a single byte value directly to a 24-bit address
|
||||
:param address: address to write to
|
||||
:param value: value to write
|
||||
"""
|
||||
self.logger.info("ST to 0x{0:06X}".format(address))
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
|
||||
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
|
||||
return self._st_data_phase([value & 0xFF])
|
||||
|
||||
def st16(self, address, value):
|
||||
"""
|
||||
Store a 16-bit word value directly to a 24-bit address
|
||||
:param address: address to write to
|
||||
:param value: value to write
|
||||
"""
|
||||
self.logger.info("ST to 0x{0:06X}".format(address))
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
|
||||
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
|
||||
return self._st_data_phase([value & 0xFF, (value >> 8) & 0xFF])
|
||||
|
||||
def st_ptr(self, address):
|
||||
"""
|
||||
Set the pointer location
|
||||
:param address: address to write
|
||||
"""
|
||||
self.logger.info("ST to ptr")
|
||||
self.updi_phy.send(
|
||||
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24,
|
||||
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
|
||||
response = self.updi_phy.receive(1)
|
||||
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
|
||||
raise PymcuprogError("Error with st_ptr")
|
||||
322
software/tools/pymcuprog/libs/pymcuprog/serialupdi/nvm.py
Normal file
322
software/tools/pymcuprog/libs/pymcuprog/serialupdi/nvm.py
Normal file
@@ -0,0 +1,322 @@
|
||||
"""
|
||||
NVM implementations on various UPDI device families
|
||||
"""
|
||||
from logging import getLogger
|
||||
from pymcuprog.pymcuprog_errors import PymcuprogError
|
||||
from . import constants
|
||||
from .timeout import Timeout
|
||||
from time import sleep
|
||||
|
||||
class NvmUpdi(object):
|
||||
"""
|
||||
Base class for NVM
|
||||
"""
|
||||
|
||||
def __init__(self, readwrite, device):
|
||||
self.logger = getLogger(__name__)
|
||||
self.readwrite = readwrite
|
||||
self.device = device
|
||||
|
||||
def chip_erase(self):
|
||||
"""
|
||||
Does a chip erase using the NVM controller
|
||||
"""
|
||||
raise NotImplementedError("NVM stack not ready")
|
||||
|
||||
def write_flash(self, address, data):
|
||||
"""
|
||||
Writes data to flash
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
raise NotImplementedError("NVM stack not ready")
|
||||
|
||||
def write_eeprom(self, address, data):
|
||||
"""
|
||||
Write data to EEPROM
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
raise NotImplementedError("NVM stack not ready")
|
||||
|
||||
def write_fuse(self, address, data):
|
||||
"""
|
||||
Writes one fuse value
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
raise NotImplementedError("NVM stack not ready")
|
||||
|
||||
def wait_flash_ready(self):
|
||||
"""
|
||||
Waits for the NVM controller to be ready
|
||||
"""
|
||||
timeout = Timeout(10000) # 10 sec timeout, just to be sure
|
||||
|
||||
self.logger.debug("Wait flash ready")
|
||||
while not timeout.expired():
|
||||
status = self.readwrite.read_byte(self.device.nvmctrl_address + constants.UPDI_NVMCTRL_STATUS)
|
||||
if status & (1 << constants.UPDI_NVM_STATUS_WRITE_ERROR):
|
||||
self.logger.error("NVM error")
|
||||
return False
|
||||
|
||||
if not status & ((1 << constants.UPDI_NVM_STATUS_EEPROM_BUSY) |
|
||||
(1 << constants.UPDI_NVM_STATUS_FLASH_BUSY)):
|
||||
return True
|
||||
|
||||
self.logger.error("Wait flash ready timed out")
|
||||
return False
|
||||
|
||||
def execute_nvm_command(self, command):
|
||||
"""
|
||||
Executes an NVM COMMAND on the NVM CTRL
|
||||
:param command: command to execute
|
||||
"""
|
||||
self.logger.debug("NVMCMD %d executing", command)
|
||||
return self.readwrite.write_byte(self.device.nvmctrl_address + constants.UPDI_NVMCTRL_CTRLA, command)
|
||||
|
||||
|
||||
class NvmUpdiTinyMega(NvmUpdi):
|
||||
"""
|
||||
AKA Version 0 UPDI NVM
|
||||
Present on, for example, tiny817 -> mega4809
|
||||
"""
|
||||
|
||||
def __init__(self, readwrite, device):
|
||||
NvmUpdi.__init__(self, readwrite, device)
|
||||
self.logger = getLogger(__name__)
|
||||
|
||||
def chip_erase(self):
|
||||
"""
|
||||
Does a chip erase using the NVM controller
|
||||
|
||||
Note that on locked devices this is not possible
|
||||
and the ERASE KEY has to be used instead, see the unlock method
|
||||
"""
|
||||
self.logger.info("Chip erase using NVM CTRL")
|
||||
|
||||
# Wait until NVM CTRL is ready to erase
|
||||
if not self.wait_flash_ready():
|
||||
raise IOError("Timeout waiting for flash ready before erase ")
|
||||
|
||||
# Erase
|
||||
self.execute_nvm_command(constants.UPDI_V0_NVMCTRL_CTRLA_CHIP_ERASE)
|
||||
|
||||
# And wait for it
|
||||
if not self.wait_flash_ready():
|
||||
raise IOError("Timeout waiting for flash ready after erase")
|
||||
|
||||
return True
|
||||
|
||||
def write_flash(self, address, data, blocksize=2, bulkwrite=0, pagewrite_delay=0):
|
||||
"""
|
||||
Writes data to flash (v0)
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
return self.write_nvm(address, data, use_word_access=True, blocksize=blocksize, bulkwrite=bulkwrite, pagewrite_delay=pagewrite_delay)
|
||||
|
||||
def write_eeprom(self, address, data):
|
||||
"""
|
||||
Write data to EEPROM (v0)
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
return self.write_nvm(address, data, use_word_access=False,
|
||||
nvmcommand=constants.UPDI_V0_NVMCTRL_CTRLA_ERASE_WRITE_PAGE)
|
||||
|
||||
def write_fuse(self, address, data):
|
||||
"""
|
||||
Writes one fuse value (v0)
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
|
||||
# Check that NVM controller is ready
|
||||
if not self.wait_flash_ready():
|
||||
raise PymcuprogError("Timeout waiting for flash ready before page buffer clear ")
|
||||
|
||||
# Write address to NVMCTRL ADDR
|
||||
self.logger.debug("Load NVM address")
|
||||
self.readwrite.write_byte(self.device.nvmctrl_address + constants.UPDI_NVMCTRL_ADDRL, address & 0xFF)
|
||||
self.readwrite.write_byte(self.device.nvmctrl_address + constants.UPDI_NVMCTRL_ADDRH, (address >> 8) & 0xFF)
|
||||
|
||||
# Write data
|
||||
self.logger.debug("Load fuse data")
|
||||
self.readwrite.write_byte(self.device.nvmctrl_address + constants.UPDI_NVMCTRL_DATAL, data[0] & 0xFF)
|
||||
|
||||
# Execute
|
||||
self.logger.debug("Execute fuse write")
|
||||
self.execute_nvm_command(constants.UPDI_V0_NVMCTRL_CTRLA_WRITE_FUSE)
|
||||
|
||||
if not self.wait_flash_ready():
|
||||
raise PymcuprogError("Timeout waiting for flash ready before page buffer clear ")
|
||||
|
||||
def write_nvm(self, address, data, use_word_access, nvmcommand=constants.UPDI_V0_NVMCTRL_CTRLA_WRITE_PAGE,
|
||||
blocksize=2, bulkwrite=0, pagewrite_delay=0):
|
||||
"""
|
||||
Writes a page of data to NVM (v0)
|
||||
|
||||
By default the PAGE_WRITE command is used, which
|
||||
requires that the page is already erased.
|
||||
By default word access is used (flash)
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
:param use_word_access: write whole words?
|
||||
:param nvmcommand: command to use for commit
|
||||
:param bulkwrite: Passed down from nvmserialupdi 0 = normal or single write.
|
||||
1 means it's part of writing the whole flash.
|
||||
In that case we only st ptr if address = 0.
|
||||
:param pagewrite_delay: (ms) delay before pagewrite
|
||||
|
||||
"""
|
||||
|
||||
# unless we are in a bulk (whole flash) write, in which case we skip almost everything.
|
||||
if (bulkwrite == 0 ) or address == 0x8000 or address == 0x4000 or not use_word_access:
|
||||
# Check that NVM controller is ready
|
||||
# I will grudgingly check this at the very start. I am extremely skeptical about the usefulness of this test.
|
||||
# If it's not ready, they'll get another error will they not? Every command like this costs about a half second
|
||||
# on every upload when using serialupdi - at any bsaud rate, assuming 256 pages. It's all USB latency.
|
||||
if not self.wait_flash_ready():
|
||||
raise PymcuprogError("Timeout waiting for flash ready before page buffer clear ")
|
||||
# Clear the page buffer
|
||||
self.logger.debug("Clear page buffer")
|
||||
self.execute_nvm_command(constants.UPDI_V0_NVMCTRL_CTRLA_PAGE_BUFFER_CLR)
|
||||
|
||||
# Wait for NVM controller to be ready
|
||||
if not self.wait_flash_ready():
|
||||
raise PymcuprogError("Timeout waiting for flash ready after page buffer clear")
|
||||
|
||||
# Load the page buffer by writing directly to location
|
||||
if use_word_access:
|
||||
self.readwrite.write_data_words(address, data, blocksize)
|
||||
else:
|
||||
self.readwrite.write_data(address, data)
|
||||
|
||||
# Write the page to NVM, maybe erase first
|
||||
self.logger.debug("Committing data")
|
||||
|
||||
self.execute_nvm_command(nvmcommand)
|
||||
|
||||
if pagewrite_delay > 0:
|
||||
sleep(pagewrite_delay/1000.0)
|
||||
# SACRIFICES SPEED FOR COMPATIBILITY - above line should execute only when --pagepause command line parameter is 1 or more (default 0), so we can adjust it externally
|
||||
# it should sleep for that many milliseconds (the granularity of this is low enough enough that 0.001 vs 0.005 makes no difference in my testing)
|
||||
# I couldn't propagate it through this mess, and I really tried, because it is a 2:1 performance hit on CH340 on some parts, which is brutal, but it breaks too many adapters to not have it
|
||||
# this should only ever happen for tinyAVR/megaAVR, NEVER Dx-series parts.
|
||||
if not bulkwrite == 1:
|
||||
# do a final NVM status check only if not doing a bulk write, or after the last chunk (when bulkwrite = 2)
|
||||
# not doing this every page made uploads about 15% faster
|
||||
if not self.wait_flash_ready():
|
||||
raise PymcuprogError("Timeout waiting for flash ready after page write ")
|
||||
|
||||
|
||||
class NvmUpdiAvrDx(NvmUpdi):
|
||||
"""
|
||||
AKA Version 1 UPDI NVM
|
||||
Present on, for example, AVR-DA and newer
|
||||
"""
|
||||
|
||||
def __init__(self, readwrite, device):
|
||||
NvmUpdi.__init__(self, readwrite, device)
|
||||
self.logger = getLogger(__name__)
|
||||
|
||||
def chip_erase(self):
|
||||
"""
|
||||
Does a chip erase using the NVM controller
|
||||
Note that on locked devices this it not possible
|
||||
and the ERASE KEY has to be used instead
|
||||
"""
|
||||
self.logger.info("Chip erase using NVM CTRL")
|
||||
|
||||
# Wait until NVM CTRL is ready to erase
|
||||
if not self.wait_flash_ready():
|
||||
raise Exception("Timeout waiting for flash ready before erase ")
|
||||
|
||||
# Erase
|
||||
self.execute_nvm_command(constants.UPDI_V1_NVMCTRL_CTRLA_CHIP_ERASE)
|
||||
|
||||
# And wait for it
|
||||
if not self.wait_flash_ready():
|
||||
raise Exception("Timeout waiting for flash ready after erase")
|
||||
|
||||
return True
|
||||
|
||||
def write_flash(self, address, data, blocksize=2, bulkwrite=0, pagewrite_delay=0):
|
||||
"""
|
||||
Writes data to flash (v1)
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
:return:
|
||||
"""
|
||||
return self.write_nvm(address, data, use_word_access=True, blocksize=blocksize, bulkwrite=bulkwrite, pagewrite_delay=pagewrite_delay)
|
||||
|
||||
def write_eeprom(self, address, data):
|
||||
"""
|
||||
Writes data to NVM (EEPROM)
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
nvm_command = constants.UPDI_V1_NVMCTRL_CTRLA_EEPROM_ERASE_WRITE
|
||||
|
||||
# Check that NVM controller is ready
|
||||
if not self.wait_flash_ready():
|
||||
raise Exception("Timeout waiting for NVM ready before command write")
|
||||
|
||||
# Write the command to the NVM controller
|
||||
self.logger.info("NVM EEPROM erase/write command")
|
||||
self.execute_nvm_command(nvm_command)
|
||||
|
||||
# Write the data
|
||||
self.readwrite.write_data(address, data)
|
||||
|
||||
# Wait for NVM controller to be ready again
|
||||
if not self.wait_flash_ready():
|
||||
raise Exception("Timeout waiting for NVM ready after data write")
|
||||
|
||||
# Remove command from NVM controller
|
||||
self.logger.info("Clear NVM command")
|
||||
self.execute_nvm_command(constants.UPDI_V1_NVMCTRL_CTRLA_NOCMD)
|
||||
|
||||
def write_fuse(self, address, data):
|
||||
"""
|
||||
Writes one fuse value
|
||||
V1 fuses are EEPROM-based
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
return self.write_eeprom(address, data)
|
||||
|
||||
def write_nvm(self, address, data, use_word_access, blocksize=2, bulkwrite=0, pagewrite_delay=0):
|
||||
"""
|
||||
Writes data to NVM (version 1)
|
||||
This version of the NVM block has no page buffer, so words are written directly.
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
:param use_word_access: write in whole words?
|
||||
"""
|
||||
nvm_command = constants.UPDI_V1_NVMCTRL_CTRLA_FLASH_WRITE
|
||||
|
||||
if bulkwrite == 0 or address == 0x800000:
|
||||
# Check that NVM controller is ready
|
||||
if not self.wait_flash_ready():
|
||||
raise Exception("Timeout waiting for flash ready before page buffer clear ")
|
||||
|
||||
# Write the command to the NVM controller
|
||||
self.logger.info("NVM write command")
|
||||
self.execute_nvm_command(nvm_command)
|
||||
|
||||
# Write the data
|
||||
if use_word_access:
|
||||
self.readwrite.write_data_words(address, data, blocksize)
|
||||
else:
|
||||
self.readwrite.write_data(address, data)
|
||||
|
||||
# Wait for NVM controller to be ready again
|
||||
if bulkwrite != 1:
|
||||
if not self.wait_flash_ready():
|
||||
raise Exception("Timeout waiting for flash ready after data write")
|
||||
|
||||
# Remove command from NVM controller
|
||||
self.logger.info("Clear NVM command")
|
||||
self.execute_nvm_command(constants.UPDI_V1_NVMCTRL_CTRLA_NOCMD)
|
||||
150
software/tools/pymcuprog/libs/pymcuprog/serialupdi/physical.py
Normal file
150
software/tools/pymcuprog/libs/pymcuprog/serialupdi/physical.py
Normal file
@@ -0,0 +1,150 @@
|
||||
"""
|
||||
Serial driver for UPDI stack
|
||||
"""
|
||||
import time
|
||||
from logging import getLogger
|
||||
import serial
|
||||
from serial.serialutil import SerialException
|
||||
|
||||
from . import constants
|
||||
|
||||
|
||||
class UpdiPhysical:
|
||||
"""
|
||||
PDI physical driver using a given serial port at a given baud
|
||||
"""
|
||||
|
||||
def __init__(self, port, baud=115200):
|
||||
"""
|
||||
Initialise the serial port
|
||||
"""
|
||||
self.logger = getLogger(__name__)
|
||||
|
||||
# Inter-byte delay
|
||||
self.ibdly = 0.0001
|
||||
self.port = port
|
||||
self.baud = baud
|
||||
self.ser = None
|
||||
|
||||
self.initialise_serial(self.port, self.baud)
|
||||
|
||||
# send an initial break as handshake
|
||||
self.send([constants.UPDI_BREAK])
|
||||
|
||||
def change_baud(self, newbaud):
|
||||
self.ser.baudrate = newbaud
|
||||
|
||||
def initialise_serial(self, port, baud):
|
||||
"""
|
||||
Standard serial port initialisation
|
||||
:param port: serial port to use
|
||||
:param baud: baud rate
|
||||
"""
|
||||
self.logger.info("Opening port '%s' at '%d' baud", port, baud)
|
||||
try:
|
||||
self.ser = serial.Serial(None, baud, parity=serial.PARITY_EVEN, timeout=1, stopbits=serial.STOPBITS_TWO)
|
||||
self.ser.port = port
|
||||
self.ser.dtr = False
|
||||
self.ser.rts = False
|
||||
self.ser.open()
|
||||
except SerialException:
|
||||
self.logger.error("Unable to open serial port '%s'", port)
|
||||
raise
|
||||
|
||||
def _loginfo(self, msg, data):
|
||||
if data and isinstance(data[0], str):
|
||||
i_data = [ord(x) for x in data]
|
||||
else:
|
||||
i_data = data
|
||||
data_str = "[" + ", ".join([hex(x) for x in i_data]) + "]"
|
||||
self.logger.debug("%s : %s", msg, data_str)
|
||||
|
||||
def send_double_break(self):
|
||||
"""
|
||||
Sends a double break to reset the UPDI port
|
||||
|
||||
BREAK is actually just a slower zero frame
|
||||
A double break is guaranteed to push the UPDI state
|
||||
machine into a known state, albeit rather brutally
|
||||
"""
|
||||
|
||||
self.logger.info("Sending double break")
|
||||
|
||||
# Re-init at a lower baud
|
||||
# At 300 bauds, the break character will pull the line low for 30ms
|
||||
# Which is slightly above the recommended 24.6ms
|
||||
self.ser.close()
|
||||
temporary_serial = serial.Serial(None, 300, parity=serial.PARITY_EVEN, timeout=1,
|
||||
stopbits=serial.STOPBITS_ONE)
|
||||
temporary_serial.port = self.port
|
||||
temporary_serial.dtr = False
|
||||
temporary_serial.rts = False
|
||||
temporary_serial.open()
|
||||
|
||||
# Send two break characters, with 1 stop bit in between
|
||||
temporary_serial.write([constants.UPDI_BREAK])
|
||||
|
||||
# Wait for the double break end
|
||||
temporary_serial.read(1)
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
# Send two break characters, with 1 stop bit in between
|
||||
temporary_serial.write([constants.UPDI_BREAK])
|
||||
|
||||
# Wait for the double break end
|
||||
temporary_serial.read(1)
|
||||
|
||||
# Re-init at the real baud
|
||||
temporary_serial.close()
|
||||
self.initialise_serial(self.port, self.baud)
|
||||
|
||||
def send(self, command):
|
||||
"""
|
||||
Sends a char array to UPDI with NO inter-byte delay
|
||||
Note that the byte will echo back
|
||||
"""
|
||||
self.logger.info("send %d bytes", len(command))
|
||||
self._loginfo("data: ", command)
|
||||
|
||||
self.ser.write(command)
|
||||
# it will echo back.
|
||||
echo = self.ser.read(len(command))
|
||||
|
||||
def receive(self, size):
|
||||
"""
|
||||
Receives a frame of a known number of chars from UPDI
|
||||
:param size: bytes to receive
|
||||
"""
|
||||
response = bytearray()
|
||||
timeout = 1
|
||||
|
||||
# For each byte
|
||||
while size and timeout:
|
||||
|
||||
# Read
|
||||
character = self.ser.read()
|
||||
|
||||
# Anything in?
|
||||
if character:
|
||||
response.append(ord(character))
|
||||
size -= 1
|
||||
else:
|
||||
timeout -= 1
|
||||
|
||||
self._loginfo("receive", response)
|
||||
return response
|
||||
|
||||
def sib(self):
|
||||
"""
|
||||
System information block is just a string coming back from a SIB command
|
||||
"""
|
||||
self.send([
|
||||
constants.UPDI_PHY_SYNC,
|
||||
constants.UPDI_KEY | constants.UPDI_KEY_SIB | constants.UPDI_SIB_32BYTES])
|
||||
return self.ser.readline()
|
||||
|
||||
def __del__(self):
|
||||
if self.ser:
|
||||
self.logger.info("Closing port '%s'", self.port)
|
||||
self.ser.close()
|
||||
160
software/tools/pymcuprog/libs/pymcuprog/serialupdi/readwrite.py
Normal file
160
software/tools/pymcuprog/libs/pymcuprog/serialupdi/readwrite.py
Normal file
@@ -0,0 +1,160 @@
|
||||
"""
|
||||
Read/write access provider for UPDI
|
||||
"""
|
||||
from logging import getLogger
|
||||
from pymcuprog.pymcuprog_errors import PymcuprogError
|
||||
from . import constants
|
||||
|
||||
|
||||
class UpdiReadWrite(object):
|
||||
"""
|
||||
Provides various forms of reads and writes for UPDI applications
|
||||
Makes us of the datalink provided
|
||||
"""
|
||||
|
||||
def __init__(self, datalink):
|
||||
self.logger = getLogger(__name__)
|
||||
self.datalink = datalink
|
||||
|
||||
def read_cs(self, address):
|
||||
"""
|
||||
Read from Control/Status space
|
||||
:param address: address (index) to read
|
||||
:return: value read
|
||||
"""
|
||||
return self.datalink.ldcs(address)
|
||||
|
||||
def write_cs(self, address, value):
|
||||
"""
|
||||
Write to Control/Status space
|
||||
:param address: address (index) to write
|
||||
:param value: 8-bit value to write
|
||||
"""
|
||||
return self.datalink.stcs(address, value)
|
||||
|
||||
def write_key(self, size, key):
|
||||
"""
|
||||
Write a KEY into UPDI
|
||||
:param size: size of key to send
|
||||
:param key: key value
|
||||
"""
|
||||
return self.datalink.key(size, key)
|
||||
|
||||
def read_sib(self):
|
||||
"""
|
||||
Read the SIB from UPDI
|
||||
:return: SIB string (bytearray) read
|
||||
"""
|
||||
return self.datalink.read_sib()
|
||||
|
||||
def read_byte(self, address):
|
||||
"""
|
||||
Read a single byte from UPDI
|
||||
:param address: address to read from
|
||||
:return: value read
|
||||
"""
|
||||
return self.datalink.ld(address)
|
||||
|
||||
def write_byte(self, address, value):
|
||||
"""
|
||||
Writes a single byte to UPDI
|
||||
:param address: address to write to
|
||||
:param value: value to write
|
||||
"""
|
||||
return self.datalink.st(address, value)
|
||||
|
||||
def read_data(self, address, size):
|
||||
"""
|
||||
Reads a number of bytes of data from UPDI
|
||||
:param address: address to write to
|
||||
:param size: number of bytes to read
|
||||
"""
|
||||
self.logger.debug("Reading %d bytes from 0x%04X", size, address)
|
||||
# Range check
|
||||
if size > constants.UPDI_MAX_REPEAT_SIZE:
|
||||
raise PymcuprogError("Cant read that many bytes in one go")
|
||||
|
||||
|
||||
# Store the address
|
||||
self.datalink.st_ptr(address)
|
||||
|
||||
# Fire up the repeat
|
||||
if size > 1:
|
||||
self.datalink.repeat(size)
|
||||
|
||||
# Do the read(s)
|
||||
return self.datalink.ld_ptr_inc(size)
|
||||
|
||||
def read_data_words(self, address, words):
|
||||
"""
|
||||
Reads a number of words of data from UPDI
|
||||
:param address: address to write to
|
||||
:param words: number of words to read
|
||||
"""
|
||||
self.logger.debug("Reading %d words from 0x%04X", words, address)
|
||||
|
||||
# Range check
|
||||
if words > constants.UPDI_MAX_REPEAT_SIZE:
|
||||
raise PymcuprogError("Cant read that many words in one go")
|
||||
|
||||
# special case for single word - so we can optimize ld_ptr_inc16 for >1 word to improve performance.
|
||||
if words == 1:
|
||||
return self.datalink.ld16(self,address)
|
||||
|
||||
# Otherwise, store the address
|
||||
self.datalink.st_ptr(address)
|
||||
|
||||
# For performance, managing repeat count is done in ld_ptr_inc16()
|
||||
return self.datalink.ld_ptr_inc16(words)
|
||||
|
||||
def write_data_words(self, address, data, blocksize):
|
||||
"""
|
||||
Writes a number of words to memory
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
:blocksize: max number of bytes being sent
|
||||
"""
|
||||
# Special-case of 1 word
|
||||
if len(data) == 2:
|
||||
value = data[0] + (data[1] << 8)
|
||||
return self.datalink.st16(address, value)
|
||||
|
||||
# Range check
|
||||
if len(data) > constants.UPDI_MAX_REPEAT_SIZE << 1:
|
||||
raise PymcuprogError("Invalid length")
|
||||
|
||||
# Store the address
|
||||
self.datalink.st_ptr(address)
|
||||
|
||||
|
||||
# For performance, we want to do this with Response Signature Disable set, otherwise the USB Serial latency kills you
|
||||
# Just setting RSD here then turning it off at the end - it helps a lot, but you run up against the USB latency. So
|
||||
# now, EVERYTHING was moved into the st_ptr_inc16_RSD() function. Unless blocksize precludes it, the whole thing
|
||||
# is sent to the serial adapter in a single transfer.
|
||||
# the st_pty_inc16_RSD routine does the repeat and rsd enable/disable stu
|
||||
return self.datalink.st_ptr_inc16_RSD(data, blocksize)
|
||||
|
||||
def write_data(self, address, data):
|
||||
"""
|
||||
Writes a number of bytes to memory
|
||||
:param address: address to write to
|
||||
:param data: data to write
|
||||
"""
|
||||
# Special case of 1 byte
|
||||
if len(data) == 1:
|
||||
return self.datalink.st(address, data[0])
|
||||
# Special case of 2 byte
|
||||
if len(data) == 2:
|
||||
self.datalink.st(address, data[0])
|
||||
return self.datalink.st(address + 1, data[1])
|
||||
|
||||
# Range check
|
||||
if len(data) > constants.UPDI_MAX_REPEAT_SIZE:
|
||||
raise PymcuprogError("Invalid length")
|
||||
|
||||
# Store the address
|
||||
self.datalink.st_ptr(address)
|
||||
|
||||
# Fire up the repeat
|
||||
self.datalink.repeat(len(data))
|
||||
return self.datalink.st_ptr_inc(data)
|
||||
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
Simple timer helper for UPDI stack
|
||||
"""
|
||||
import time
|
||||
|
||||
#pylint: disable=too-few-public-methods
|
||||
class Timeout:
|
||||
"""
|
||||
Simple timeout helper in milliseconds.
|
||||
"""
|
||||
|
||||
def __init__(self, timeout_ms):
|
||||
"""
|
||||
Start the expired counter instantly
|
||||
:param timeout_ms: milliseconds to count
|
||||
"""
|
||||
|
||||
self.timeout_ms = timeout_ms
|
||||
self.start_time = time.time()
|
||||
|
||||
def expired(self):
|
||||
"""
|
||||
Check if the timeout has expired
|
||||
"""
|
||||
return time.time() - self.start_time > self.timeout_ms / 1000.0
|
||||
36
software/tools/pymcuprog/libs/pymcuprog/toolconnection.py
Normal file
36
software/tools/pymcuprog/libs/pymcuprog/toolconnection.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
This module includes wrapper classes for Tool connection parameters
|
||||
"""
|
||||
|
||||
#pylint: disable=too-few-public-methods
|
||||
class ToolConnection(object):
|
||||
"""
|
||||
Base class for ToolConnection classes used to wrap configuration parameters for tool connections
|
||||
"""
|
||||
|
||||
#pylint: disable=too-few-public-methods
|
||||
class ToolUsbHidConnection(ToolConnection):
|
||||
"""
|
||||
Helper class wrapping configuration parameters for a connection to a USB HID tool
|
||||
"""
|
||||
serialnumber = None
|
||||
tool_name = None
|
||||
|
||||
def __init__(self, serialnumber=None, tool_name=None):
|
||||
"""
|
||||
:param tool_name: Tool name as given in USB Product string. Some shortnames are also supported
|
||||
as defined in pyedbglib.hidtransport.toolinfo.py. Set to None if don't care
|
||||
:param serialnumber: USB serial number string. Set to None if don't care
|
||||
"""
|
||||
self.serialnumber = serialnumber
|
||||
self.tool_name = tool_name
|
||||
|
||||
#pylint: disable=too-few-public-methods
|
||||
class ToolSerialConnection(ToolConnection):
|
||||
"""
|
||||
Helper class wrapping configuration parameters for a connection to a serial port
|
||||
"""
|
||||
serialport = None
|
||||
|
||||
def __init__(self, serialport="COM1"):
|
||||
self.serialport = serialport
|
||||
275
software/tools/pymcuprog/libs/pymcuprog/utils.py
Normal file
275
software/tools/pymcuprog/libs/pymcuprog/utils.py
Normal file
@@ -0,0 +1,275 @@
|
||||
"""
|
||||
Utility functions for pymcuprog
|
||||
"""
|
||||
# Python 3 compatibility for Python 2
|
||||
from __future__ import print_function
|
||||
|
||||
from pyedbglib.protocols.housekeepingprotocol import Jtagice3HousekeepingProtocol
|
||||
from pyedbglib.protocols.jtagice3protocol import Jtagice3ResponseError
|
||||
from pyedbglib.protocols.jtagice3protocol import Jtagice3Protocol
|
||||
|
||||
from .pymcuprog_errors import PymcuprogNotSupportedError
|
||||
|
||||
def read_tool_info(housekeeper):
|
||||
"""
|
||||
Interrogates tool (debugger) for useful info
|
||||
|
||||
:returns: Dictionary with various info about the connected debugger
|
||||
"""
|
||||
dap_info = housekeeper.dap_info()
|
||||
|
||||
# Add alias for serialnumber(==serial)
|
||||
dap_info['serialnumber'] = dap_info['serial']
|
||||
|
||||
# Read FW versions
|
||||
dap_info['firmware_major'] = housekeeper.get_byte(Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONTEXT_CONFIG,
|
||||
Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONFIG_FWREV_MAJ)
|
||||
dap_info['firmware_minor'] = housekeeper.get_byte(Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONTEXT_CONFIG,
|
||||
Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONFIG_FWREV_MIN)
|
||||
dap_info['build'] = housekeeper.get_le16(Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONTEXT_CONFIG,
|
||||
Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONFIG_BUILD)
|
||||
|
||||
# Read HW revision
|
||||
dap_info['hardware_rev'] = housekeeper.get_byte(Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONTEXT_CONFIG,
|
||||
Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONFIG_HWREV)
|
||||
|
||||
# Some EDBG versions do NOT have the dap_info 'device' tag populated for non-ARM parts.
|
||||
# Sneak in and collect the data from the EDBG config instead
|
||||
if dap_info['product'][:4] == 'EDBG' and dap_info['device_name'] == '':
|
||||
try:
|
||||
# Vendor command
|
||||
cmd = bytearray([0x83])
|
||||
# Add cnt of '1' element:
|
||||
cmd.append(1)
|
||||
# Add tag of 'TARGET DEVICE NAME'
|
||||
cmd.append(0x04)
|
||||
# Add dummy 'param'
|
||||
cmd.append(ord('?'))
|
||||
# Add the offset
|
||||
offset = 0
|
||||
cmd.extend([offset & 0xFF, offset >> 8])
|
||||
|
||||
# Add the chunk size
|
||||
numbytes = 32
|
||||
cmd.extend([numbytes & 0xFF, numbytes >> 8])
|
||||
|
||||
# raw command routed via the HK interface
|
||||
response = housekeeper.dap_command_response(cmd)
|
||||
dap_info['device_name'] = response[6:6 + numbytes].split(b'\0')[0].decode()
|
||||
except: #pylint: disable=bare-except
|
||||
# resort to ''
|
||||
pass
|
||||
return dap_info
|
||||
|
||||
def print_tool_info(info):
|
||||
"""
|
||||
Print out various tool information
|
||||
|
||||
:param info: Dictionary with various tool info as returned from read_tool_info()
|
||||
"""
|
||||
print("Connected to {0:s} from {1:s} (serial number {2:s})".format(info['product'], info['vendor'],
|
||||
info['serial']))
|
||||
|
||||
print("Debugger firmware version {0:d}.{1:d}.{2:d}".format(info['firmware_major'],
|
||||
info['firmware_minor'],
|
||||
info['build']))
|
||||
|
||||
print("Debugger hardware revision {0:d}".format(info['hardware_rev']))
|
||||
|
||||
def read_target_voltage(housekeeper):
|
||||
"""
|
||||
Read target voltage
|
||||
|
||||
:param housekeeper: instance of pyedbglib.protocols.housekeepingprotocol.Jtagice3HousekeepingProtocol
|
||||
"""
|
||||
return read_voltage_parameter(housekeeper, Jtagice3HousekeepingProtocol.HOUSEKEEPING_ANALOG_VTREF)
|
||||
|
||||
def read_supply_voltage_setpoint(housekeeper):
|
||||
"""
|
||||
Read supply setpoint
|
||||
|
||||
:param housekeeper: instance of pyedbglib.protocols.housekeepingprotocol.Jtagice3HousekeepingProtocol
|
||||
"""
|
||||
return read_voltage_parameter(housekeeper, Jtagice3HousekeepingProtocol.HOUSEKEEPING_TSUP_VOLTAGE)
|
||||
|
||||
def read_usb_voltage(housekeeper):
|
||||
"""
|
||||
Read USB voltage
|
||||
|
||||
:param housekeeper: instance of pyedbglib.protocols.housekeepingprotocol.Jtagice3HousekeepingProtocol
|
||||
"""
|
||||
return read_voltage_parameter(housekeeper, Jtagice3HousekeepingProtocol.HOUSEKEEPING_ANALOG_VUSB)
|
||||
|
||||
def read_voltage_parameter(housekeeper, offset):
|
||||
"""
|
||||
Generic read voltage from tool parameter
|
||||
|
||||
:param housekeeper: Instance of pyedbglib.protocols.housekeepingprotocol.Jtagice3HousekeepingProtocol
|
||||
:param offset: Tool parameter offset to read
|
||||
"""
|
||||
housekeeper.start_session()
|
||||
voltage = housekeeper.get_le16(Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONTEXT_ANALOG, offset)
|
||||
voltage = voltage / 1000.0
|
||||
housekeeper.end_session()
|
||||
return voltage
|
||||
|
||||
def set_supply_voltage_setpoint(housekeeper, voltage):
|
||||
"""
|
||||
Set supply setpoint
|
||||
|
||||
:param housekeeper: Instance of pyedbglib.protocols.housekeepingprotocol.Jtagice3HousekeepingProtocol
|
||||
:param voltage: New setpoint for target supply
|
||||
"""
|
||||
try:
|
||||
housekeeper.get_le16(Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONTEXT_ANALOG,
|
||||
Jtagice3HousekeepingProtocol.HOUSEKEEPING_TSUP_VOLTAGE)
|
||||
except Jtagice3ResponseError:
|
||||
raise PymcuprogNotSupportedError("Connected debugger/board does not have supply voltage capability.")
|
||||
|
||||
setpoint_mv = int(voltage*1000)
|
||||
|
||||
try:
|
||||
housekeeper.set_le16(Jtagice3HousekeepingProtocol.HOUSEKEEPING_CONTEXT_ANALOG,
|
||||
Jtagice3HousekeepingProtocol.HOUSEKEEPING_TSUP_VOLTAGE, setpoint_mv)
|
||||
# Unfortunately pyedbglib only throws a generic Exception in this case so no specific Exceptions can be caught
|
||||
# See DSG-1494
|
||||
#pylint: disable=broad-except
|
||||
except Exception as error:
|
||||
if "failure code 0x{:02x}".format(Jtagice3Protocol.SETGET_FAILURE_INVALID_VALUE) in str(error).lower():
|
||||
raise ValueError("Specified voltage out of range!")
|
||||
# Voltage was within range but something else went wrong. Just forward the exception.
|
||||
raise
|
||||
|
||||
def compare(data0, data1, offset, verify_mask=None):
|
||||
"""
|
||||
Compares the two byte arrays
|
||||
|
||||
:param data0: first array for compare
|
||||
:param data1: second array for compare
|
||||
:param offset: address offset in the memory area, for printing
|
||||
:param verify_mask: compare mask (for varying instruction width)
|
||||
:return:
|
||||
"""
|
||||
if verify_mask is None:
|
||||
verify_mask = [0xFF]
|
||||
|
||||
# Check first that lengths match
|
||||
if len(data0) != len(data1):
|
||||
raise ValueError("Length mismatch on verify, expect 0x{:04X} but got 0x{:04X}".format(len(data0),len(data1)))
|
||||
|
||||
mask_len = len(verify_mask)
|
||||
|
||||
for i in range(0, len(data0), mask_len):
|
||||
for dat in range(0, mask_len):
|
||||
if (data0[i+dat] & verify_mask[dat]) != (data1[i+dat] & verify_mask[dat]):
|
||||
raise ValueError("Verify mismatch starting at location 0x{:06X}: 0x{:02X} vs 0x{:02X}".
|
||||
format(i+dat+offset, data0[i+dat] & verify_mask[dat], data1[i+dat] & verify_mask[dat]))
|
||||
|
||||
|
||||
def showdata(data, address=0, page_size=None, line_wrap=16):
|
||||
"""
|
||||
Show (print) the data
|
||||
|
||||
:param data: an array/list of data to show
|
||||
:param address: byte address to data
|
||||
:param page_size: page size in bytes
|
||||
:param line_wrap: how many bytes to print per line
|
||||
"""
|
||||
|
||||
# Cannot print more per line than the page size
|
||||
if page_size is not None:
|
||||
if line_wrap > page_size:
|
||||
line_wrap = page_size
|
||||
|
||||
print("-"*(line_wrap*3+9))
|
||||
|
||||
# Page alignment
|
||||
rows = 0
|
||||
if page_size is not None:
|
||||
page = address % page_size
|
||||
rows = int(page / line_wrap)
|
||||
for row in range(rows):
|
||||
print("0x{0:06X}: ".format(address-page+row*line_wrap), end='')
|
||||
print("xx "*line_wrap, end='')
|
||||
print("")
|
||||
|
||||
# Calculate offset from aligned data
|
||||
div = address % line_wrap
|
||||
|
||||
print("0x{0:06X}: ".format(address-div), end='')
|
||||
# Add some empty bytes
|
||||
print("xx "*div, end='')
|
||||
|
||||
# keep track of page wraps
|
||||
wrap = False
|
||||
|
||||
for i, value in enumerate(data, div+1):
|
||||
print("{0:02X} ".format(value), end='')
|
||||
if page_size is not None:
|
||||
if (i+(rows*line_wrap)) % page_size == 0 and i != len(data)+div:
|
||||
print("")
|
||||
wrap = True
|
||||
if i % line_wrap == 0 and i != len(data)+div or wrap:
|
||||
print("")
|
||||
print("0x{0:06X}: ".format(address-div + i), end='')
|
||||
wrap = False
|
||||
|
||||
# Figure out how many extra empty data positions to print
|
||||
extra = line_wrap - div - (len(data) % line_wrap)
|
||||
if extra % line_wrap == 0:
|
||||
extra = 0
|
||||
|
||||
print("xx "*extra, end='')
|
||||
print("")
|
||||
print("-"*(line_wrap*3+9))
|
||||
|
||||
|
||||
def pagealign(data, address, page_size, data_size=1):
|
||||
"""
|
||||
Aligns data to the start of a page
|
||||
"""
|
||||
# Pre-pad the data if it does not start at the start of a page
|
||||
offset = address % page_size
|
||||
for _ in range(offset):
|
||||
data.insert(0, 0xFF)
|
||||
# In case of other data sizes, post-pad the data
|
||||
while len(data) % data_size:
|
||||
data.append(0xFF)
|
||||
|
||||
return data, address-offset
|
||||
|
||||
def pad_to_size(memory_block, chunk_size, pad_value):
|
||||
"""
|
||||
Pads a chunk of memory
|
||||
"""
|
||||
while len(memory_block) % chunk_size > 0:
|
||||
memory_block.append(pad_value)
|
||||
|
||||
|
||||
def enum(**enums):
|
||||
"""
|
||||
Emulates an Enum type
|
||||
|
||||
Needed for Python 2.7 compatibility as Python did not get built-in support for enums until version 3.4
|
||||
"""
|
||||
return type('Enum', (), enums)
|
||||
|
||||
|
||||
def verify_flash_from_bin(bin_filename, backend, offset=0, max_read_chunk=None):
|
||||
"""
|
||||
Verify the contents of flash against a bin-file
|
||||
|
||||
:param filename: Name/path of bin-file to verify
|
||||
:param backend: Reference the Backend class of pymcuprog
|
||||
:param offset: Memory offset to start verify from
|
||||
:returns: Boolean value indicating success or failure of the operation
|
||||
"""
|
||||
bin_file = open(bin_filename, 'rb')
|
||||
bin_data = bytearray()
|
||||
for line in bin_file.readlines():
|
||||
bin_data.extend(line)
|
||||
|
||||
verify_status = backend.verify_memory(bin_data, 'flash', offset, max_read_chunk=max_read_chunk)
|
||||
if verify_status is False:
|
||||
return False
|
||||
return True
|
||||
4
software/tools/pymcuprog/libs/pymcuprog/version.py
Normal file
4
software/tools/pymcuprog/libs/pymcuprog/version.py
Normal file
@@ -0,0 +1,4 @@
|
||||
""" This file was generated when pymcuprog was built """
|
||||
VERSION = '3.6.4.86'
|
||||
COMMIT_ID = '6224623cfc85c5e7cb6b1df8419b72c338c223f9'
|
||||
BUILD_DATE = '2020-11-09 14:49:15 +0000'
|
||||
89
software/tools/pymcuprog/libs/serial/__init__.py
Normal file
89
software/tools/pymcuprog/libs/serial/__init__.py
Normal file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# This is a wrapper module for different platform implementations
|
||||
#
|
||||
# This file is part of pySerial. https://github.com/pyserial/pyserial
|
||||
# (C) 2001-2017 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
import sys
|
||||
import importlib
|
||||
|
||||
from serial.serialutil import *
|
||||
#~ SerialBase, SerialException, to_bytes, iterbytes
|
||||
|
||||
__version__ = '3.4'
|
||||
|
||||
VERSION = __version__
|
||||
|
||||
# pylint: disable=wrong-import-position
|
||||
if sys.platform == 'cli':
|
||||
from serial.serialcli import Serial
|
||||
else:
|
||||
import os
|
||||
# chose an implementation, depending on os
|
||||
if os.name == 'nt': # sys.platform == 'win32':
|
||||
from serial.serialwin32 import Serial
|
||||
elif os.name == 'posix':
|
||||
from serial.serialposix import Serial, PosixPollSerial, VTIMESerial # noqa
|
||||
elif os.name == 'java':
|
||||
from serial.serialjava import Serial
|
||||
else:
|
||||
raise ImportError("Sorry: no implementation for your platform ('{}') available".format(os.name))
|
||||
|
||||
|
||||
protocol_handler_packages = [
|
||||
'serial.urlhandler',
|
||||
]
|
||||
|
||||
|
||||
def serial_for_url(url, *args, **kwargs):
|
||||
"""\
|
||||
Get an instance of the Serial class, depending on port/url. The port is not
|
||||
opened when the keyword parameter 'do_not_open' is true, by default it
|
||||
is. All other parameters are directly passed to the __init__ method when
|
||||
the port is instantiated.
|
||||
|
||||
The list of package names that is searched for protocol handlers is kept in
|
||||
``protocol_handler_packages``.
|
||||
|
||||
e.g. we want to support a URL ``foobar://``. A module
|
||||
``my_handlers.protocol_foobar`` is provided by the user. Then
|
||||
``protocol_handler_packages.append("my_handlers")`` would extend the search
|
||||
path so that ``serial_for_url("foobar://"))`` would work.
|
||||
"""
|
||||
# check and remove extra parameter to not confuse the Serial class
|
||||
do_open = not kwargs.pop('do_not_open', False)
|
||||
# the default is to use the native implementation
|
||||
klass = Serial
|
||||
try:
|
||||
url_lowercase = url.lower()
|
||||
except AttributeError:
|
||||
# it's not a string, use default
|
||||
pass
|
||||
else:
|
||||
# if it is an URL, try to import the handler module from the list of possible packages
|
||||
if '://' in url_lowercase:
|
||||
protocol = url_lowercase.split('://', 1)[0]
|
||||
module_name = '.protocol_{}'.format(protocol)
|
||||
for package_name in protocol_handler_packages:
|
||||
try:
|
||||
importlib.import_module(package_name)
|
||||
handler_module = importlib.import_module(module_name, package_name)
|
||||
except ImportError:
|
||||
continue
|
||||
else:
|
||||
if hasattr(handler_module, 'serial_class_for_url'):
|
||||
url, klass = handler_module.serial_class_for_url(url)
|
||||
else:
|
||||
klass = handler_module.Serial
|
||||
break
|
||||
else:
|
||||
raise ValueError('invalid URL, protocol {!r} not known'.format(protocol))
|
||||
# instantiate and open when desired
|
||||
instance = klass(None, *args, **kwargs)
|
||||
instance.port = url
|
||||
if do_open:
|
||||
instance.open()
|
||||
return instance
|
||||
115
software/tools/pymcuprog/libs/serial/aio.py
Normal file
115
software/tools/pymcuprog/libs/serial/aio.py
Normal file
@@ -0,0 +1,115 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Python Serial Port Extension for Win32, Linux, BSD, Jython
|
||||
# module for serial IO for POSIX compatible systems, like Linux
|
||||
# see __init__.py
|
||||
#
|
||||
# (C) 2015 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
"""\
|
||||
Support asyncio with serial ports. EXPERIMENTAL
|
||||
|
||||
Posix platforms only, Python 3.4+ only.
|
||||
|
||||
Windows event loops can not wait for serial ports with the current
|
||||
implementation. It should be possible to get that working though.
|
||||
"""
|
||||
import asyncio
|
||||
import serial
|
||||
import logger
|
||||
|
||||
|
||||
class SerialTransport(asyncio.Transport):
|
||||
def __init__(self, loop, protocol, serial_instance):
|
||||
self._loop = loop
|
||||
self._protocol = protocol
|
||||
self.serial = serial_instance
|
||||
self._closing = False
|
||||
self._paused = False
|
||||
# XXX how to support url handlers too
|
||||
self.serial.timeout = 0
|
||||
self.serial.nonblocking()
|
||||
loop.call_soon(protocol.connection_made, self)
|
||||
# only start reading when connection_made() has been called
|
||||
loop.call_soon(loop.add_reader, self.serial.fd, self._read_ready)
|
||||
|
||||
def __repr__(self):
|
||||
return '{self.__class__.__name__}({self._loop}, {self._protocol}, {self.serial})'.format(self=self)
|
||||
|
||||
def close(self):
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self._loop.remove_reader(self.serial.fd)
|
||||
self.serial.close()
|
||||
self._loop.call_soon(self._protocol.connection_lost, None)
|
||||
|
||||
def _read_ready(self):
|
||||
data = self.serial.read(1024)
|
||||
if data:
|
||||
self._protocol.data_received(data)
|
||||
|
||||
def write(self, data):
|
||||
self.serial.write(data)
|
||||
|
||||
def can_write_eof(self):
|
||||
return False
|
||||
|
||||
def pause_reading(self):
|
||||
if self._closing:
|
||||
raise RuntimeError('Cannot pause_reading() when closing')
|
||||
if self._paused:
|
||||
raise RuntimeError('Already paused')
|
||||
self._paused = True
|
||||
self._loop.remove_reader(self._sock_fd)
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses reading", self)
|
||||
|
||||
def resume_reading(self):
|
||||
if not self._paused:
|
||||
raise RuntimeError('Not paused')
|
||||
self._paused = False
|
||||
if self._closing:
|
||||
return
|
||||
self._loop.add_reader(self._sock_fd, self._read_ready)
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes reading", self)
|
||||
|
||||
#~ def set_write_buffer_limits(self, high=None, low=None):
|
||||
#~ def get_write_buffer_size(self):
|
||||
#~ def writelines(self, list_of_data):
|
||||
#~ def write_eof(self):
|
||||
#~ def abort(self):
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def create_serial_connection(loop, protocol_factory, *args, **kwargs):
|
||||
ser = serial.Serial(*args, **kwargs)
|
||||
protocol = protocol_factory()
|
||||
transport = SerialTransport(loop, protocol, ser)
|
||||
return (transport, protocol)
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# test
|
||||
if __name__ == '__main__':
|
||||
class Output(asyncio.Protocol):
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
print('port opened', transport)
|
||||
transport.serial.rts = False
|
||||
transport.write(b'hello world\n')
|
||||
|
||||
def data_received(self, data):
|
||||
print('data received', repr(data))
|
||||
self.transport.close()
|
||||
|
||||
def connection_lost(self, exc):
|
||||
print('port closed')
|
||||
asyncio.get_event_loop().stop()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
coro = create_serial_connection(loop, Output, '/dev/ttyUSB0', baudrate=115200)
|
||||
loop.run_until_complete(coro)
|
||||
loop.run_forever()
|
||||
loop.close()
|
||||
1346
software/tools/pymcuprog/libs/serial/rfc2217.py
Normal file
1346
software/tools/pymcuprog/libs/serial/rfc2217.py
Normal file
File diff suppressed because it is too large
Load Diff
92
software/tools/pymcuprog/libs/serial/rs485.py
Normal file
92
software/tools/pymcuprog/libs/serial/rs485.py
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# RS485 support
|
||||
#
|
||||
# This file is part of pySerial. https://github.com/pyserial/pyserial
|
||||
# (C) 2015 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""\
|
||||
The settings for RS485 are stored in a dedicated object that can be applied to
|
||||
serial ports (where supported).
|
||||
NOTE: Some implementations may only support a subset of the settings.
|
||||
"""
|
||||
|
||||
import time
|
||||
import serial
|
||||
|
||||
|
||||
class RS485Settings(object):
|
||||
def __init__(
|
||||
self,
|
||||
rts_level_for_tx=True,
|
||||
rts_level_for_rx=False,
|
||||
loopback=False,
|
||||
delay_before_tx=None,
|
||||
delay_before_rx=None):
|
||||
self.rts_level_for_tx = rts_level_for_tx
|
||||
self.rts_level_for_rx = rts_level_for_rx
|
||||
self.loopback = loopback
|
||||
self.delay_before_tx = delay_before_tx
|
||||
self.delay_before_rx = delay_before_rx
|
||||
|
||||
|
||||
class RS485(serial.Serial):
|
||||
"""\
|
||||
A subclass that replaces the write method with one that toggles RTS
|
||||
according to the RS485 settings.
|
||||
|
||||
NOTE: This may work unreliably on some serial ports (control signals not
|
||||
synchronized or delayed compared to data). Using delays may be
|
||||
unreliable (varying times, larger than expected) as the OS may not
|
||||
support very fine grained delays (no smaller than in the order of
|
||||
tens of milliseconds).
|
||||
|
||||
NOTE: Some implementations support this natively. Better performance
|
||||
can be expected when the native version is used.
|
||||
|
||||
NOTE: The loopback property is ignored by this implementation. The actual
|
||||
behavior depends on the used hardware.
|
||||
|
||||
Usage:
|
||||
|
||||
ser = RS485(...)
|
||||
ser.rs485_mode = RS485Settings(...)
|
||||
ser.write(b'hello')
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RS485, self).__init__(*args, **kwargs)
|
||||
self._alternate_rs485_settings = None
|
||||
|
||||
def write(self, b):
|
||||
"""Write to port, controlling RTS before and after transmitting."""
|
||||
if self._alternate_rs485_settings is not None:
|
||||
# apply level for TX and optional delay
|
||||
self.setRTS(self._alternate_rs485_settings.rts_level_for_tx)
|
||||
if self._alternate_rs485_settings.delay_before_tx is not None:
|
||||
time.sleep(self._alternate_rs485_settings.delay_before_tx)
|
||||
# write and wait for data to be written
|
||||
super(RS485, self).write(b)
|
||||
super(RS485, self).flush()
|
||||
# optional delay and apply level for RX
|
||||
if self._alternate_rs485_settings.delay_before_rx is not None:
|
||||
time.sleep(self._alternate_rs485_settings.delay_before_rx)
|
||||
self.setRTS(self._alternate_rs485_settings.rts_level_for_rx)
|
||||
else:
|
||||
super(RS485, self).write(b)
|
||||
|
||||
# redirect where the property stores the settings so that underlying Serial
|
||||
# instance does not see them
|
||||
@property
|
||||
def rs485_mode(self):
|
||||
"""\
|
||||
Enable RS485 mode and apply new settings, set to None to disable.
|
||||
See serial.rs485.RS485Settings for more info about the value.
|
||||
"""
|
||||
return self._alternate_rs485_settings
|
||||
|
||||
@rs485_mode.setter
|
||||
def rs485_mode(self, rs485_settings):
|
||||
self._alternate_rs485_settings = rs485_settings
|
||||
251
software/tools/pymcuprog/libs/serial/serialcli.py
Normal file
251
software/tools/pymcuprog/libs/serial/serialcli.py
Normal file
@@ -0,0 +1,251 @@
|
||||
#! python
|
||||
#
|
||||
# Backend for .NET/Mono (IronPython), .NET >= 2
|
||||
#
|
||||
# This file is part of pySerial. https://github.com/pyserial/pyserial
|
||||
# (C) 2008-2015 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
import System
|
||||
import System.IO.Ports
|
||||
from serial.serialutil import *
|
||||
|
||||
# must invoke function with byte array, make a helper to convert strings
|
||||
# to byte arrays
|
||||
sab = System.Array[System.Byte]
|
||||
|
||||
|
||||
def as_byte_array(string):
|
||||
return sab([ord(x) for x in string]) # XXX will require adaption when run with a 3.x compatible IronPython
|
||||
|
||||
|
||||
class Serial(SerialBase):
|
||||
"""Serial port implementation for .NET/Mono."""
|
||||
|
||||
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
|
||||
9600, 19200, 38400, 57600, 115200)
|
||||
|
||||
def open(self):
|
||||
"""\
|
||||
Open port with current settings. This may throw a SerialException
|
||||
if the port cannot be opened.
|
||||
"""
|
||||
if self._port is None:
|
||||
raise SerialException("Port must be configured before it can be used.")
|
||||
if self.is_open:
|
||||
raise SerialException("Port is already open.")
|
||||
try:
|
||||
self._port_handle = System.IO.Ports.SerialPort(self.portstr)
|
||||
except Exception as msg:
|
||||
self._port_handle = None
|
||||
raise SerialException("could not open port %s: %s" % (self.portstr, msg))
|
||||
|
||||
# if RTS and/or DTR are not set before open, they default to True
|
||||
if self._rts_state is None:
|
||||
self._rts_state = True
|
||||
if self._dtr_state is None:
|
||||
self._dtr_state = True
|
||||
|
||||
self._reconfigure_port()
|
||||
self._port_handle.Open()
|
||||
self.is_open = True
|
||||
if not self._dsrdtr:
|
||||
self._update_dtr_state()
|
||||
if not self._rtscts:
|
||||
self._update_rts_state()
|
||||
self.reset_input_buffer()
|
||||
|
||||
def _reconfigure_port(self):
|
||||
"""Set communication parameters on opened port."""
|
||||
if not self._port_handle:
|
||||
raise SerialException("Can only operate on a valid port handle")
|
||||
|
||||
#~ self._port_handle.ReceivedBytesThreshold = 1
|
||||
|
||||
if self._timeout is None:
|
||||
self._port_handle.ReadTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
|
||||
else:
|
||||
self._port_handle.ReadTimeout = int(self._timeout * 1000)
|
||||
|
||||
# if self._timeout != 0 and self._interCharTimeout is not None:
|
||||
# timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
|
||||
|
||||
if self._write_timeout is None:
|
||||
self._port_handle.WriteTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
|
||||
else:
|
||||
self._port_handle.WriteTimeout = int(self._write_timeout * 1000)
|
||||
|
||||
# Setup the connection info.
|
||||
try:
|
||||
self._port_handle.BaudRate = self._baudrate
|
||||
except IOError as e:
|
||||
# catch errors from illegal baudrate settings
|
||||
raise ValueError(str(e))
|
||||
|
||||
if self._bytesize == FIVEBITS:
|
||||
self._port_handle.DataBits = 5
|
||||
elif self._bytesize == SIXBITS:
|
||||
self._port_handle.DataBits = 6
|
||||
elif self._bytesize == SEVENBITS:
|
||||
self._port_handle.DataBits = 7
|
||||
elif self._bytesize == EIGHTBITS:
|
||||
self._port_handle.DataBits = 8
|
||||
else:
|
||||
raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
|
||||
|
||||
if self._parity == PARITY_NONE:
|
||||
self._port_handle.Parity = getattr(System.IO.Ports.Parity, 'None') # reserved keyword in Py3k
|
||||
elif self._parity == PARITY_EVEN:
|
||||
self._port_handle.Parity = System.IO.Ports.Parity.Even
|
||||
elif self._parity == PARITY_ODD:
|
||||
self._port_handle.Parity = System.IO.Ports.Parity.Odd
|
||||
elif self._parity == PARITY_MARK:
|
||||
self._port_handle.Parity = System.IO.Ports.Parity.Mark
|
||||
elif self._parity == PARITY_SPACE:
|
||||
self._port_handle.Parity = System.IO.Ports.Parity.Space
|
||||
else:
|
||||
raise ValueError("Unsupported parity mode: %r" % self._parity)
|
||||
|
||||
if self._stopbits == STOPBITS_ONE:
|
||||
self._port_handle.StopBits = System.IO.Ports.StopBits.One
|
||||
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
|
||||
self._port_handle.StopBits = System.IO.Ports.StopBits.OnePointFive
|
||||
elif self._stopbits == STOPBITS_TWO:
|
||||
self._port_handle.StopBits = System.IO.Ports.StopBits.Two
|
||||
else:
|
||||
raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
|
||||
|
||||
if self._rtscts and self._xonxoff:
|
||||
self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSendXOnXOff
|
||||
elif self._rtscts:
|
||||
self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSend
|
||||
elif self._xonxoff:
|
||||
self._port_handle.Handshake = System.IO.Ports.Handshake.XOnXOff
|
||||
else:
|
||||
self._port_handle.Handshake = getattr(System.IO.Ports.Handshake, 'None') # reserved keyword in Py3k
|
||||
|
||||
#~ def __del__(self):
|
||||
#~ self.close()
|
||||
|
||||
def close(self):
|
||||
"""Close port"""
|
||||
if self.is_open:
|
||||
if self._port_handle:
|
||||
try:
|
||||
self._port_handle.Close()
|
||||
except System.IO.Ports.InvalidOperationException:
|
||||
# ignore errors. can happen for unplugged USB serial devices
|
||||
pass
|
||||
self._port_handle = None
|
||||
self.is_open = False
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
@property
|
||||
def in_waiting(self):
|
||||
"""Return the number of characters currently in the input buffer."""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
return self._port_handle.BytesToRead
|
||||
|
||||
def read(self, size=1):
|
||||
"""\
|
||||
Read size bytes from the serial port. If a timeout is set it may
|
||||
return less characters as requested. With no timeout it will block
|
||||
until the requested number of bytes is read.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
# must use single byte reads as this is the only way to read
|
||||
# without applying encodings
|
||||
data = bytearray()
|
||||
while size:
|
||||
try:
|
||||
data.append(self._port_handle.ReadByte())
|
||||
except System.TimeoutException:
|
||||
break
|
||||
else:
|
||||
size -= 1
|
||||
return bytes(data)
|
||||
|
||||
def write(self, data):
|
||||
"""Output the given string over the serial port."""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
#~ if not isinstance(data, (bytes, bytearray)):
|
||||
#~ raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
|
||||
try:
|
||||
# must call overloaded method with byte array argument
|
||||
# as this is the only one not applying encodings
|
||||
self._port_handle.Write(as_byte_array(data), 0, len(data))
|
||||
except System.TimeoutException:
|
||||
raise writeTimeoutError
|
||||
return len(data)
|
||||
|
||||
def reset_input_buffer(self):
|
||||
"""Clear input buffer, discarding all that is in the buffer."""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
self._port_handle.DiscardInBuffer()
|
||||
|
||||
def reset_output_buffer(self):
|
||||
"""\
|
||||
Clear output buffer, aborting the current output and
|
||||
discarding all that is in the buffer.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
self._port_handle.DiscardOutBuffer()
|
||||
|
||||
def _update_break_state(self):
|
||||
"""
|
||||
Set break: Controls TXD. When active, to transmitting is possible.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
self._port_handle.BreakState = bool(self._break_state)
|
||||
|
||||
def _update_rts_state(self):
|
||||
"""Set terminal status line: Request To Send"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
self._port_handle.RtsEnable = bool(self._rts_state)
|
||||
|
||||
def _update_dtr_state(self):
|
||||
"""Set terminal status line: Data Terminal Ready"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
self._port_handle.DtrEnable = bool(self._dtr_state)
|
||||
|
||||
@property
|
||||
def cts(self):
|
||||
"""Read terminal status line: Clear To Send"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
return self._port_handle.CtsHolding
|
||||
|
||||
@property
|
||||
def dsr(self):
|
||||
"""Read terminal status line: Data Set Ready"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
return self._port_handle.DsrHolding
|
||||
|
||||
@property
|
||||
def ri(self):
|
||||
"""Read terminal status line: Ring Indicator"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
#~ return self._port_handle.XXX
|
||||
return False # XXX an error would be better
|
||||
|
||||
@property
|
||||
def cd(self):
|
||||
"""Read terminal status line: Carrier Detect"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
return self._port_handle.CDHolding
|
||||
|
||||
# - - platform specific - - - -
|
||||
# none
|
||||
249
software/tools/pymcuprog/libs/serial/serialjava.py
Normal file
249
software/tools/pymcuprog/libs/serial/serialjava.py
Normal file
@@ -0,0 +1,249 @@
|
||||
#!jython
|
||||
#
|
||||
# Backend Jython with JavaComm
|
||||
#
|
||||
# This file is part of pySerial. https://github.com/pyserial/pyserial
|
||||
# (C) 2002-2015 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
from serial.serialutil import *
|
||||
|
||||
|
||||
def my_import(name):
|
||||
mod = __import__(name)
|
||||
components = name.split('.')
|
||||
for comp in components[1:]:
|
||||
mod = getattr(mod, comp)
|
||||
return mod
|
||||
|
||||
|
||||
def detect_java_comm(names):
|
||||
"""try given list of modules and return that imports"""
|
||||
for name in names:
|
||||
try:
|
||||
mod = my_import(name)
|
||||
mod.SerialPort
|
||||
return mod
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
raise ImportError("No Java Communications API implementation found")
|
||||
|
||||
|
||||
# Java Communications API implementations
|
||||
# http://mho.republika.pl/java/comm/
|
||||
|
||||
comm = detect_java_comm([
|
||||
'javax.comm', # Sun/IBM
|
||||
'gnu.io', # RXTX
|
||||
])
|
||||
|
||||
|
||||
def device(portnumber):
|
||||
"""Turn a port number into a device name"""
|
||||
enum = comm.CommPortIdentifier.getPortIdentifiers()
|
||||
ports = []
|
||||
while enum.hasMoreElements():
|
||||
el = enum.nextElement()
|
||||
if el.getPortType() == comm.CommPortIdentifier.PORT_SERIAL:
|
||||
ports.append(el)
|
||||
return ports[portnumber].getName()
|
||||
|
||||
|
||||
class Serial(SerialBase):
|
||||
"""\
|
||||
Serial port class, implemented with Java Communications API and
|
||||
thus usable with jython and the appropriate java extension.
|
||||
"""
|
||||
|
||||
def open(self):
|
||||
"""\
|
||||
Open port with current settings. This may throw a SerialException
|
||||
if the port cannot be opened.
|
||||
"""
|
||||
if self._port is None:
|
||||
raise SerialException("Port must be configured before it can be used.")
|
||||
if self.is_open:
|
||||
raise SerialException("Port is already open.")
|
||||
if type(self._port) == type(''): # strings are taken directly
|
||||
portId = comm.CommPortIdentifier.getPortIdentifier(self._port)
|
||||
else:
|
||||
portId = comm.CommPortIdentifier.getPortIdentifier(device(self._port)) # numbers are transformed to a comport id obj
|
||||
try:
|
||||
self.sPort = portId.open("python serial module", 10)
|
||||
except Exception as msg:
|
||||
self.sPort = None
|
||||
raise SerialException("Could not open port: %s" % msg)
|
||||
self._reconfigurePort()
|
||||
self._instream = self.sPort.getInputStream()
|
||||
self._outstream = self.sPort.getOutputStream()
|
||||
self.is_open = True
|
||||
|
||||
def _reconfigurePort(self):
|
||||
"""Set communication parameters on opened port."""
|
||||
if not self.sPort:
|
||||
raise SerialException("Can only operate on a valid port handle")
|
||||
|
||||
self.sPort.enableReceiveTimeout(30)
|
||||
if self._bytesize == FIVEBITS:
|
||||
jdatabits = comm.SerialPort.DATABITS_5
|
||||
elif self._bytesize == SIXBITS:
|
||||
jdatabits = comm.SerialPort.DATABITS_6
|
||||
elif self._bytesize == SEVENBITS:
|
||||
jdatabits = comm.SerialPort.DATABITS_7
|
||||
elif self._bytesize == EIGHTBITS:
|
||||
jdatabits = comm.SerialPort.DATABITS_8
|
||||
else:
|
||||
raise ValueError("unsupported bytesize: %r" % self._bytesize)
|
||||
|
||||
if self._stopbits == STOPBITS_ONE:
|
||||
jstopbits = comm.SerialPort.STOPBITS_1
|
||||
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
|
||||
jstopbits = comm.SerialPort.STOPBITS_1_5
|
||||
elif self._stopbits == STOPBITS_TWO:
|
||||
jstopbits = comm.SerialPort.STOPBITS_2
|
||||
else:
|
||||
raise ValueError("unsupported number of stopbits: %r" % self._stopbits)
|
||||
|
||||
if self._parity == PARITY_NONE:
|
||||
jparity = comm.SerialPort.PARITY_NONE
|
||||
elif self._parity == PARITY_EVEN:
|
||||
jparity = comm.SerialPort.PARITY_EVEN
|
||||
elif self._parity == PARITY_ODD:
|
||||
jparity = comm.SerialPort.PARITY_ODD
|
||||
elif self._parity == PARITY_MARK:
|
||||
jparity = comm.SerialPort.PARITY_MARK
|
||||
elif self._parity == PARITY_SPACE:
|
||||
jparity = comm.SerialPort.PARITY_SPACE
|
||||
else:
|
||||
raise ValueError("unsupported parity type: %r" % self._parity)
|
||||
|
||||
jflowin = jflowout = 0
|
||||
if self._rtscts:
|
||||
jflowin |= comm.SerialPort.FLOWCONTROL_RTSCTS_IN
|
||||
jflowout |= comm.SerialPort.FLOWCONTROL_RTSCTS_OUT
|
||||
if self._xonxoff:
|
||||
jflowin |= comm.SerialPort.FLOWCONTROL_XONXOFF_IN
|
||||
jflowout |= comm.SerialPort.FLOWCONTROL_XONXOFF_OUT
|
||||
|
||||
self.sPort.setSerialPortParams(self._baudrate, jdatabits, jstopbits, jparity)
|
||||
self.sPort.setFlowControlMode(jflowin | jflowout)
|
||||
|
||||
if self._timeout >= 0:
|
||||
self.sPort.enableReceiveTimeout(int(self._timeout*1000))
|
||||
else:
|
||||
self.sPort.disableReceiveTimeout()
|
||||
|
||||
def close(self):
|
||||
"""Close port"""
|
||||
if self.is_open:
|
||||
if self.sPort:
|
||||
self._instream.close()
|
||||
self._outstream.close()
|
||||
self.sPort.close()
|
||||
self.sPort = None
|
||||
self.is_open = False
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
@property
|
||||
def in_waiting(self):
|
||||
"""Return the number of characters currently in the input buffer."""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
return self._instream.available()
|
||||
|
||||
def read(self, size=1):
|
||||
"""\
|
||||
Read size bytes from the serial port. If a timeout is set it may
|
||||
return less characters as requested. With no timeout it will block
|
||||
until the requested number of bytes is read.
|
||||
"""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
read = bytearray()
|
||||
if size > 0:
|
||||
while len(read) < size:
|
||||
x = self._instream.read()
|
||||
if x == -1:
|
||||
if self.timeout >= 0:
|
||||
break
|
||||
else:
|
||||
read.append(x)
|
||||
return bytes(read)
|
||||
|
||||
def write(self, data):
|
||||
"""Output the given string over the serial port."""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
if not isinstance(data, (bytes, bytearray)):
|
||||
raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
|
||||
self._outstream.write(data)
|
||||
return len(data)
|
||||
|
||||
def reset_input_buffer(self):
|
||||
"""Clear input buffer, discarding all that is in the buffer."""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self._instream.skip(self._instream.available())
|
||||
|
||||
def reset_output_buffer(self):
|
||||
"""\
|
||||
Clear output buffer, aborting the current output and
|
||||
discarding all that is in the buffer.
|
||||
"""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self._outstream.flush()
|
||||
|
||||
def send_break(self, duration=0.25):
|
||||
"""Send break condition. Timed, returns to idle state after given duration."""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self.sPort.sendBreak(duration*1000.0)
|
||||
|
||||
def _update_break_state(self):
|
||||
"""Set break: Controls TXD. When active, to transmitting is possible."""
|
||||
if self.fd is None:
|
||||
raise portNotOpenError
|
||||
raise SerialException("The _update_break_state function is not implemented in java.")
|
||||
|
||||
def _update_rts_state(self):
|
||||
"""Set terminal status line: Request To Send"""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self.sPort.setRTS(self._rts_state)
|
||||
|
||||
def _update_dtr_state(self):
|
||||
"""Set terminal status line: Data Terminal Ready"""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self.sPort.setDTR(self._dtr_state)
|
||||
|
||||
@property
|
||||
def cts(self):
|
||||
"""Read terminal status line: Clear To Send"""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self.sPort.isCTS()
|
||||
|
||||
@property
|
||||
def dsr(self):
|
||||
"""Read terminal status line: Data Set Ready"""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self.sPort.isDSR()
|
||||
|
||||
@property
|
||||
def ri(self):
|
||||
"""Read terminal status line: Ring Indicator"""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self.sPort.isRI()
|
||||
|
||||
@property
|
||||
def cd(self):
|
||||
"""Read terminal status line: Carrier Detect"""
|
||||
if not self.sPort:
|
||||
raise portNotOpenError
|
||||
self.sPort.isCD()
|
||||
811
software/tools/pymcuprog/libs/serial/serialposix.py
Normal file
811
software/tools/pymcuprog/libs/serial/serialposix.py
Normal file
@@ -0,0 +1,811 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# backend for serial IO for POSIX compatible systems, like Linux, OSX
|
||||
#
|
||||
# This file is part of pySerial. https://github.com/pyserial/pyserial
|
||||
# (C) 2001-2016 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# parts based on code from Grant B. Edwards <grante@visi.com>:
|
||||
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
|
||||
#
|
||||
# references: http://www.easysw.com/~mike/serial/serial.html
|
||||
|
||||
# Collection of port names (was previously used by number_to_device which was
|
||||
# removed.
|
||||
# - Linux /dev/ttyS%d (confirmed)
|
||||
# - cygwin/win32 /dev/com%d (confirmed)
|
||||
# - openbsd (OpenBSD) /dev/cua%02d
|
||||
# - bsd*, freebsd* /dev/cuad%d
|
||||
# - darwin (OS X) /dev/cuad%d
|
||||
# - netbsd /dev/dty%02d (NetBSD 1.6 testing by Erk)
|
||||
# - irix (IRIX) /dev/ttyf%d (partially tested) names depending on flow control
|
||||
# - hp (HP-UX) /dev/tty%dp0 (not tested)
|
||||
# - sunos (Solaris/SunOS) /dev/tty%c (letters, 'a'..'z') (confirmed)
|
||||
# - aix (AIX) /dev/tty%d
|
||||
|
||||
|
||||
# pylint: disable=abstract-method
|
||||
import errno
|
||||
import fcntl
|
||||
import os
|
||||
import select
|
||||
import struct
|
||||
import sys
|
||||
import termios
|
||||
|
||||
import serial
|
||||
from serial.serialutil import SerialBase, SerialException, to_bytes, \
|
||||
portNotOpenError, writeTimeoutError, Timeout
|
||||
|
||||
|
||||
class PlatformSpecificBase(object):
|
||||
BAUDRATE_CONSTANTS = {}
|
||||
|
||||
def _set_special_baudrate(self, baudrate):
|
||||
raise NotImplementedError('non-standard baudrates are not supported on this platform')
|
||||
|
||||
def _set_rs485_mode(self, rs485_settings):
|
||||
raise NotImplementedError('RS485 not supported on this platform')
|
||||
|
||||
|
||||
# some systems support an extra flag to enable the two in POSIX unsupported
|
||||
# paritiy settings for MARK and SPACE
|
||||
CMSPAR = 0 # default, for unsupported platforms, override below
|
||||
|
||||
# try to detect the OS so that a device can be selected...
|
||||
# this code block should supply a device() and set_special_baudrate() function
|
||||
# for the platform
|
||||
plat = sys.platform.lower()
|
||||
|
||||
if plat[:5] == 'linux': # Linux (confirmed) # noqa
|
||||
import array
|
||||
|
||||
# extra termios flags
|
||||
CMSPAR = 0o10000000000 # Use "stick" (mark/space) parity
|
||||
|
||||
# baudrate ioctls
|
||||
TCGETS2 = 0x802C542A
|
||||
TCSETS2 = 0x402C542B
|
||||
BOTHER = 0o010000
|
||||
|
||||
# RS485 ioctls
|
||||
TIOCGRS485 = 0x542E
|
||||
TIOCSRS485 = 0x542F
|
||||
SER_RS485_ENABLED = 0b00000001
|
||||
SER_RS485_RTS_ON_SEND = 0b00000010
|
||||
SER_RS485_RTS_AFTER_SEND = 0b00000100
|
||||
SER_RS485_RX_DURING_TX = 0b00010000
|
||||
|
||||
class PlatformSpecific(PlatformSpecificBase):
|
||||
BAUDRATE_CONSTANTS = {
|
||||
0: 0o000000, # hang up
|
||||
50: 0o000001,
|
||||
75: 0o000002,
|
||||
110: 0o000003,
|
||||
134: 0o000004,
|
||||
150: 0o000005,
|
||||
200: 0o000006,
|
||||
300: 0o000007,
|
||||
600: 0o000010,
|
||||
1200: 0o000011,
|
||||
1800: 0o000012,
|
||||
2400: 0o000013,
|
||||
4800: 0o000014,
|
||||
9600: 0o000015,
|
||||
19200: 0o000016,
|
||||
38400: 0o000017,
|
||||
57600: 0o010001,
|
||||
115200: 0o010002,
|
||||
230400: 0o010003,
|
||||
460800: 0o010004,
|
||||
500000: 0o010005,
|
||||
576000: 0o010006,
|
||||
921600: 0o010007,
|
||||
1000000: 0o010010,
|
||||
1152000: 0o010011,
|
||||
1500000: 0o010012,
|
||||
2000000: 0o010013,
|
||||
2500000: 0o010014,
|
||||
3000000: 0o010015,
|
||||
3500000: 0o010016,
|
||||
4000000: 0o010017
|
||||
}
|
||||
|
||||
def _set_special_baudrate(self, baudrate):
|
||||
# right size is 44 on x86_64, allow for some growth
|
||||
buf = array.array('i', [0] * 64)
|
||||
try:
|
||||
# get serial_struct
|
||||
fcntl.ioctl(self.fd, TCGETS2, buf)
|
||||
# set custom speed
|
||||
buf[2] &= ~termios.CBAUD
|
||||
buf[2] |= BOTHER
|
||||
buf[9] = buf[10] = baudrate
|
||||
|
||||
# set serial_struct
|
||||
fcntl.ioctl(self.fd, TCSETS2, buf)
|
||||
except IOError as e:
|
||||
raise ValueError('Failed to set custom baud rate ({}): {}'.format(baudrate, e))
|
||||
|
||||
def _set_rs485_mode(self, rs485_settings):
|
||||
buf = array.array('i', [0] * 8) # flags, delaytx, delayrx, padding
|
||||
try:
|
||||
fcntl.ioctl(self.fd, TIOCGRS485, buf)
|
||||
buf[0] |= SER_RS485_ENABLED
|
||||
if rs485_settings is not None:
|
||||
if rs485_settings.loopback:
|
||||
buf[0] |= SER_RS485_RX_DURING_TX
|
||||
else:
|
||||
buf[0] &= ~SER_RS485_RX_DURING_TX
|
||||
if rs485_settings.rts_level_for_tx:
|
||||
buf[0] |= SER_RS485_RTS_ON_SEND
|
||||
else:
|
||||
buf[0] &= ~SER_RS485_RTS_ON_SEND
|
||||
if rs485_settings.rts_level_for_rx:
|
||||
buf[0] |= SER_RS485_RTS_AFTER_SEND
|
||||
else:
|
||||
buf[0] &= ~SER_RS485_RTS_AFTER_SEND
|
||||
if rs485_settings.delay_before_tx is not None:
|
||||
buf[1] = int(rs485_settings.delay_before_tx * 1000)
|
||||
if rs485_settings.delay_before_rx is not None:
|
||||
buf[2] = int(rs485_settings.delay_before_rx * 1000)
|
||||
else:
|
||||
buf[0] = 0 # clear SER_RS485_ENABLED
|
||||
fcntl.ioctl(self.fd, TIOCSRS485, buf)
|
||||
except IOError as e:
|
||||
raise ValueError('Failed to set RS485 mode: {}'.format(e))
|
||||
|
||||
|
||||
elif plat == 'cygwin': # cygwin/win32 (confirmed)
|
||||
|
||||
class PlatformSpecific(PlatformSpecificBase):
|
||||
BAUDRATE_CONSTANTS = {
|
||||
128000: 0x01003,
|
||||
256000: 0x01005,
|
||||
500000: 0x01007,
|
||||
576000: 0x01008,
|
||||
921600: 0x01009,
|
||||
1000000: 0x0100a,
|
||||
1152000: 0x0100b,
|
||||
1500000: 0x0100c,
|
||||
2000000: 0x0100d,
|
||||
2500000: 0x0100e,
|
||||
3000000: 0x0100f
|
||||
}
|
||||
|
||||
|
||||
elif plat[:6] == 'darwin': # OS X
|
||||
import array
|
||||
IOSSIOSPEED = 0x80045402 # _IOW('T', 2, speed_t)
|
||||
|
||||
class PlatformSpecific(PlatformSpecificBase):
|
||||
osx_version = os.uname()[2].split('.')
|
||||
# Tiger or above can support arbitrary serial speeds
|
||||
if int(osx_version[0]) >= 8:
|
||||
def _set_special_baudrate(self, baudrate):
|
||||
# use IOKit-specific call to set up high speeds
|
||||
buf = array.array('i', [baudrate])
|
||||
fcntl.ioctl(self.fd, IOSSIOSPEED, buf, 1)
|
||||
|
||||
elif plat[:3] == 'bsd' or \
|
||||
plat[:7] == 'freebsd' or \
|
||||
plat[:6] == 'netbsd' or \
|
||||
plat[:7] == 'openbsd':
|
||||
|
||||
class ReturnBaudrate(object):
|
||||
def __getitem__(self, key):
|
||||
return key
|
||||
|
||||
class PlatformSpecific(PlatformSpecificBase):
|
||||
# Only tested on FreeBSD:
|
||||
# The baud rate may be passed in as
|
||||
# a literal value.
|
||||
BAUDRATE_CONSTANTS = ReturnBaudrate()
|
||||
|
||||
else:
|
||||
class PlatformSpecific(PlatformSpecificBase):
|
||||
pass
|
||||
|
||||
|
||||
# load some constants for later use.
|
||||
# try to use values from termios, use defaults from linux otherwise
|
||||
TIOCMGET = getattr(termios, 'TIOCMGET', 0x5415)
|
||||
TIOCMBIS = getattr(termios, 'TIOCMBIS', 0x5416)
|
||||
TIOCMBIC = getattr(termios, 'TIOCMBIC', 0x5417)
|
||||
TIOCMSET = getattr(termios, 'TIOCMSET', 0x5418)
|
||||
|
||||
# TIOCM_LE = getattr(termios, 'TIOCM_LE', 0x001)
|
||||
TIOCM_DTR = getattr(termios, 'TIOCM_DTR', 0x002)
|
||||
TIOCM_RTS = getattr(termios, 'TIOCM_RTS', 0x004)
|
||||
# TIOCM_ST = getattr(termios, 'TIOCM_ST', 0x008)
|
||||
# TIOCM_SR = getattr(termios, 'TIOCM_SR', 0x010)
|
||||
|
||||
TIOCM_CTS = getattr(termios, 'TIOCM_CTS', 0x020)
|
||||
TIOCM_CAR = getattr(termios, 'TIOCM_CAR', 0x040)
|
||||
TIOCM_RNG = getattr(termios, 'TIOCM_RNG', 0x080)
|
||||
TIOCM_DSR = getattr(termios, 'TIOCM_DSR', 0x100)
|
||||
TIOCM_CD = getattr(termios, 'TIOCM_CD', TIOCM_CAR)
|
||||
TIOCM_RI = getattr(termios, 'TIOCM_RI', TIOCM_RNG)
|
||||
# TIOCM_OUT1 = getattr(termios, 'TIOCM_OUT1', 0x2000)
|
||||
# TIOCM_OUT2 = getattr(termios, 'TIOCM_OUT2', 0x4000)
|
||||
if hasattr(termios, 'TIOCINQ'):
|
||||
TIOCINQ = termios.TIOCINQ
|
||||
else:
|
||||
TIOCINQ = getattr(termios, 'FIONREAD', 0x541B)
|
||||
TIOCOUTQ = getattr(termios, 'TIOCOUTQ', 0x5411)
|
||||
|
||||
TIOCM_zero_str = struct.pack('I', 0)
|
||||
TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
|
||||
TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
|
||||
|
||||
TIOCSBRK = getattr(termios, 'TIOCSBRK', 0x5427)
|
||||
TIOCCBRK = getattr(termios, 'TIOCCBRK', 0x5428)
|
||||
|
||||
|
||||
class Serial(SerialBase, PlatformSpecific):
|
||||
"""\
|
||||
Serial port class POSIX implementation. Serial port configuration is
|
||||
done with termios and fcntl. Runs on Linux and many other Un*x like
|
||||
systems.
|
||||
"""
|
||||
|
||||
def open(self):
|
||||
"""\
|
||||
Open port with current settings. This may throw a SerialException
|
||||
if the port cannot be opened."""
|
||||
if self._port is None:
|
||||
raise SerialException("Port must be configured before it can be used.")
|
||||
if self.is_open:
|
||||
raise SerialException("Port is already open.")
|
||||
self.fd = None
|
||||
# open
|
||||
try:
|
||||
self.fd = os.open(self.portstr, os.O_RDWR | os.O_NOCTTY | os.O_NONBLOCK)
|
||||
except OSError as msg:
|
||||
self.fd = None
|
||||
raise SerialException(msg.errno, "could not open port {}: {}".format(self._port, msg))
|
||||
#~ fcntl.fcntl(self.fd, fcntl.F_SETFL, 0) # set blocking
|
||||
|
||||
try:
|
||||
self._reconfigure_port(force_update=True)
|
||||
except:
|
||||
try:
|
||||
os.close(self.fd)
|
||||
except:
|
||||
# ignore any exception when closing the port
|
||||
# also to keep original exception that happened when setting up
|
||||
pass
|
||||
self.fd = None
|
||||
raise
|
||||
else:
|
||||
self.is_open = True
|
||||
try:
|
||||
if not self._dsrdtr:
|
||||
self._update_dtr_state()
|
||||
if not self._rtscts:
|
||||
self._update_rts_state()
|
||||
except IOError as e:
|
||||
if e.errno in (errno.EINVAL, errno.ENOTTY):
|
||||
# ignore Invalid argument and Inappropriate ioctl
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
self.reset_input_buffer()
|
||||
self.pipe_abort_read_r, self.pipe_abort_read_w = os.pipe()
|
||||
self.pipe_abort_write_r, self.pipe_abort_write_w = os.pipe()
|
||||
fcntl.fcntl(self.pipe_abort_read_r, fcntl.F_SETFL, os.O_NONBLOCK)
|
||||
fcntl.fcntl(self.pipe_abort_write_r, fcntl.F_SETFL, os.O_NONBLOCK)
|
||||
|
||||
def _reconfigure_port(self, force_update=False):
|
||||
"""Set communication parameters on opened port."""
|
||||
if self.fd is None:
|
||||
raise SerialException("Can only operate on a valid file descriptor")
|
||||
|
||||
# if exclusive lock is requested, create it before we modify anything else
|
||||
if self._exclusive is not None:
|
||||
if self._exclusive:
|
||||
try:
|
||||
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
except IOError as msg:
|
||||
raise SerialException(msg.errno, "Could not exclusively lock port {}: {}".format(self._port, msg))
|
||||
else:
|
||||
fcntl.flock(self.fd, fcntl.LOCK_UN)
|
||||
|
||||
custom_baud = None
|
||||
|
||||
vmin = vtime = 0 # timeout is done via select
|
||||
if self._inter_byte_timeout is not None:
|
||||
vmin = 1
|
||||
vtime = int(self._inter_byte_timeout * 10)
|
||||
try:
|
||||
orig_attr = termios.tcgetattr(self.fd)
|
||||
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
|
||||
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
|
||||
raise SerialException("Could not configure port: {}".format(msg))
|
||||
# set up raw mode / no echo / binary
|
||||
cflag |= (termios.CLOCAL | termios.CREAD)
|
||||
lflag &= ~(termios.ICANON | termios.ECHO | termios.ECHOE |
|
||||
termios.ECHOK | termios.ECHONL |
|
||||
termios.ISIG | termios.IEXTEN) # |termios.ECHOPRT
|
||||
for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk
|
||||
if hasattr(termios, flag):
|
||||
lflag &= ~getattr(termios, flag)
|
||||
|
||||
oflag &= ~(termios.OPOST | termios.ONLCR | termios.OCRNL)
|
||||
iflag &= ~(termios.INLCR | termios.IGNCR | termios.ICRNL | termios.IGNBRK)
|
||||
if hasattr(termios, 'IUCLC'):
|
||||
iflag &= ~termios.IUCLC
|
||||
if hasattr(termios, 'PARMRK'):
|
||||
iflag &= ~termios.PARMRK
|
||||
|
||||
# setup baud rate
|
||||
try:
|
||||
ispeed = ospeed = getattr(termios, 'B{}'.format(self._baudrate))
|
||||
except AttributeError:
|
||||
try:
|
||||
ispeed = ospeed = self.BAUDRATE_CONSTANTS[self._baudrate]
|
||||
except KeyError:
|
||||
#~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
|
||||
# may need custom baud rate, it isn't in our list.
|
||||
ispeed = ospeed = getattr(termios, 'B38400')
|
||||
try:
|
||||
custom_baud = int(self._baudrate) # store for later
|
||||
except ValueError:
|
||||
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
|
||||
else:
|
||||
if custom_baud < 0:
|
||||
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
|
||||
|
||||
# setup char len
|
||||
cflag &= ~termios.CSIZE
|
||||
if self._bytesize == 8:
|
||||
cflag |= termios.CS8
|
||||
elif self._bytesize == 7:
|
||||
cflag |= termios.CS7
|
||||
elif self._bytesize == 6:
|
||||
cflag |= termios.CS6
|
||||
elif self._bytesize == 5:
|
||||
cflag |= termios.CS5
|
||||
else:
|
||||
raise ValueError('Invalid char len: {!r}'.format(self._bytesize))
|
||||
# setup stop bits
|
||||
if self._stopbits == serial.STOPBITS_ONE:
|
||||
cflag &= ~(termios.CSTOPB)
|
||||
elif self._stopbits == serial.STOPBITS_ONE_POINT_FIVE:
|
||||
cflag |= (termios.CSTOPB) # XXX same as TWO.. there is no POSIX support for 1.5
|
||||
elif self._stopbits == serial.STOPBITS_TWO:
|
||||
cflag |= (termios.CSTOPB)
|
||||
else:
|
||||
raise ValueError('Invalid stop bit specification: {!r}'.format(self._stopbits))
|
||||
# setup parity
|
||||
iflag &= ~(termios.INPCK | termios.ISTRIP)
|
||||
if self._parity == serial.PARITY_NONE:
|
||||
cflag &= ~(termios.PARENB | termios.PARODD | CMSPAR)
|
||||
elif self._parity == serial.PARITY_EVEN:
|
||||
cflag &= ~(termios.PARODD | CMSPAR)
|
||||
cflag |= (termios.PARENB)
|
||||
elif self._parity == serial.PARITY_ODD:
|
||||
cflag &= ~CMSPAR
|
||||
cflag |= (termios.PARENB | termios.PARODD)
|
||||
elif self._parity == serial.PARITY_MARK and CMSPAR:
|
||||
cflag |= (termios.PARENB | CMSPAR | termios.PARODD)
|
||||
elif self._parity == serial.PARITY_SPACE and CMSPAR:
|
||||
cflag |= (termios.PARENB | CMSPAR)
|
||||
cflag &= ~(termios.PARODD)
|
||||
else:
|
||||
raise ValueError('Invalid parity: {!r}'.format(self._parity))
|
||||
# setup flow control
|
||||
# xonxoff
|
||||
if hasattr(termios, 'IXANY'):
|
||||
if self._xonxoff:
|
||||
iflag |= (termios.IXON | termios.IXOFF) # |termios.IXANY)
|
||||
else:
|
||||
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
|
||||
else:
|
||||
if self._xonxoff:
|
||||
iflag |= (termios.IXON | termios.IXOFF)
|
||||
else:
|
||||
iflag &= ~(termios.IXON | termios.IXOFF)
|
||||
# rtscts
|
||||
if hasattr(termios, 'CRTSCTS'):
|
||||
if self._rtscts:
|
||||
cflag |= (termios.CRTSCTS)
|
||||
else:
|
||||
cflag &= ~(termios.CRTSCTS)
|
||||
elif hasattr(termios, 'CNEW_RTSCTS'): # try it with alternate constant name
|
||||
if self._rtscts:
|
||||
cflag |= (termios.CNEW_RTSCTS)
|
||||
else:
|
||||
cflag &= ~(termios.CNEW_RTSCTS)
|
||||
# XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
|
||||
|
||||
# buffer
|
||||
# vmin "minimal number of characters to be read. 0 for non blocking"
|
||||
if vmin < 0 or vmin > 255:
|
||||
raise ValueError('Invalid vmin: {!r}'.format(vmin))
|
||||
cc[termios.VMIN] = vmin
|
||||
# vtime
|
||||
if vtime < 0 or vtime > 255:
|
||||
raise ValueError('Invalid vtime: {!r}'.format(vtime))
|
||||
cc[termios.VTIME] = vtime
|
||||
# activate settings
|
||||
if force_update or [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr:
|
||||
termios.tcsetattr(
|
||||
self.fd,
|
||||
termios.TCSANOW,
|
||||
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
|
||||
|
||||
# apply custom baud rate, if any
|
||||
if custom_baud is not None:
|
||||
self._set_special_baudrate(custom_baud)
|
||||
|
||||
if self._rs485_mode is not None:
|
||||
self._set_rs485_mode(self._rs485_mode)
|
||||
|
||||
def close(self):
|
||||
"""Close port"""
|
||||
if self.is_open:
|
||||
if self.fd is not None:
|
||||
os.close(self.fd)
|
||||
self.fd = None
|
||||
os.close(self.pipe_abort_read_w)
|
||||
os.close(self.pipe_abort_read_r)
|
||||
os.close(self.pipe_abort_write_w)
|
||||
os.close(self.pipe_abort_write_r)
|
||||
self.pipe_abort_read_r, self.pipe_abort_read_w = None, None
|
||||
self.pipe_abort_write_r, self.pipe_abort_write_w = None, None
|
||||
self.is_open = False
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
@property
|
||||
def in_waiting(self):
|
||||
"""Return the number of bytes currently in the input buffer."""
|
||||
#~ s = fcntl.ioctl(self.fd, termios.FIONREAD, TIOCM_zero_str)
|
||||
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
|
||||
return struct.unpack('I', s)[0]
|
||||
|
||||
# select based implementation, proved to work on many systems
|
||||
def read(self, size=1):
|
||||
"""\
|
||||
Read size bytes from the serial port. If a timeout is set it may
|
||||
return less characters as requested. With no timeout it will block
|
||||
until the requested number of bytes is read.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
read = bytearray()
|
||||
timeout = Timeout(self._timeout)
|
||||
while len(read) < size:
|
||||
try:
|
||||
ready, _, _ = select.select([self.fd, self.pipe_abort_read_r], [], [], timeout.time_left())
|
||||
if self.pipe_abort_read_r in ready:
|
||||
os.read(self.pipe_abort_read_r, 1000)
|
||||
break
|
||||
# If select was used with a timeout, and the timeout occurs, it
|
||||
# returns with empty lists -> thus abort read operation.
|
||||
# For timeout == 0 (non-blocking operation) also abort when
|
||||
# there is nothing to read.
|
||||
if not ready:
|
||||
break # timeout
|
||||
buf = os.read(self.fd, size - len(read))
|
||||
# read should always return some data as select reported it was
|
||||
# ready to read when we get to this point.
|
||||
if not buf:
|
||||
# Disconnected devices, at least on Linux, show the
|
||||
# behavior that they are always ready to read immediately
|
||||
# but reading returns nothing.
|
||||
raise SerialException(
|
||||
'device reports readiness to read but returned no data '
|
||||
'(device disconnected or multiple access on port?)')
|
||||
read.extend(buf)
|
||||
except OSError as e:
|
||||
# this is for Python 3.x where select.error is a subclass of
|
||||
# OSError ignore BlockingIOErrors and EINTR. other errors are shown
|
||||
# https://www.python.org/dev/peps/pep-0475.
|
||||
if e.errno not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
|
||||
raise SerialException('read failed: {}'.format(e))
|
||||
except select.error as e:
|
||||
# this is for Python 2.x
|
||||
# ignore BlockingIOErrors and EINTR. all errors are shown
|
||||
# see also http://www.python.org/dev/peps/pep-3151/#select
|
||||
if e[0] not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
|
||||
raise SerialException('read failed: {}'.format(e))
|
||||
if timeout.expired():
|
||||
break
|
||||
return bytes(read)
|
||||
|
||||
def cancel_read(self):
|
||||
if self.is_open:
|
||||
os.write(self.pipe_abort_read_w, b"x")
|
||||
|
||||
def cancel_write(self):
|
||||
if self.is_open:
|
||||
os.write(self.pipe_abort_write_w, b"x")
|
||||
|
||||
def write(self, data):
|
||||
"""Output the given byte string over the serial port."""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
d = to_bytes(data)
|
||||
tx_len = length = len(d)
|
||||
timeout = Timeout(self._write_timeout)
|
||||
while tx_len > 0:
|
||||
try:
|
||||
n = os.write(self.fd, d)
|
||||
if timeout.is_non_blocking:
|
||||
# Zero timeout indicates non-blocking - simply return the
|
||||
# number of bytes of data actually written
|
||||
return n
|
||||
elif not timeout.is_infinite:
|
||||
# when timeout is set, use select to wait for being ready
|
||||
# with the time left as timeout
|
||||
if timeout.expired():
|
||||
raise writeTimeoutError
|
||||
abort, ready, _ = select.select([self.pipe_abort_write_r], [self.fd], [], timeout.time_left())
|
||||
if abort:
|
||||
os.read(self.pipe_abort_write_r, 1000)
|
||||
break
|
||||
if not ready:
|
||||
raise writeTimeoutError
|
||||
else:
|
||||
assert timeout.time_left() is None
|
||||
# wait for write operation
|
||||
abort, ready, _ = select.select([self.pipe_abort_write_r], [self.fd], [], None)
|
||||
if abort:
|
||||
os.read(self.pipe_abort_write_r, 1)
|
||||
break
|
||||
if not ready:
|
||||
raise SerialException('write failed (select)')
|
||||
d = d[n:]
|
||||
tx_len -= n
|
||||
except SerialException:
|
||||
raise
|
||||
except OSError as e:
|
||||
# this is for Python 3.x where select.error is a subclass of
|
||||
# OSError ignore BlockingIOErrors and EINTR. other errors are shown
|
||||
# https://www.python.org/dev/peps/pep-0475.
|
||||
if e.errno not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
|
||||
raise SerialException('write failed: {}'.format(e))
|
||||
except select.error as e:
|
||||
# this is for Python 2.x
|
||||
# ignore BlockingIOErrors and EINTR. all errors are shown
|
||||
# see also http://www.python.org/dev/peps/pep-3151/#select
|
||||
if e[0] not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
|
||||
raise SerialException('write failed: {}'.format(e))
|
||||
if not timeout.is_non_blocking and timeout.expired():
|
||||
raise writeTimeoutError
|
||||
return length - len(d)
|
||||
|
||||
def flush(self):
|
||||
"""\
|
||||
Flush of file like objects. In this case, wait until all data
|
||||
is written.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
termios.tcdrain(self.fd)
|
||||
|
||||
def reset_input_buffer(self):
|
||||
"""Clear input buffer, discarding all that is in the buffer."""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
termios.tcflush(self.fd, termios.TCIFLUSH)
|
||||
|
||||
def reset_output_buffer(self):
|
||||
"""\
|
||||
Clear output buffer, aborting the current output and discarding all
|
||||
that is in the buffer.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
termios.tcflush(self.fd, termios.TCOFLUSH)
|
||||
|
||||
def send_break(self, duration=0.25):
|
||||
"""\
|
||||
Send break condition. Timed, returns to idle state after given
|
||||
duration.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
termios.tcsendbreak(self.fd, int(duration / 0.25))
|
||||
|
||||
def _update_break_state(self):
|
||||
"""\
|
||||
Set break: Controls TXD. When active, no transmitting is possible.
|
||||
"""
|
||||
if self._break_state:
|
||||
fcntl.ioctl(self.fd, TIOCSBRK)
|
||||
else:
|
||||
fcntl.ioctl(self.fd, TIOCCBRK)
|
||||
|
||||
def _update_rts_state(self):
|
||||
"""Set terminal status line: Request To Send"""
|
||||
if self._rts_state:
|
||||
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
|
||||
else:
|
||||
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
|
||||
|
||||
def _update_dtr_state(self):
|
||||
"""Set terminal status line: Data Terminal Ready"""
|
||||
if self._dtr_state:
|
||||
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
|
||||
else:
|
||||
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
|
||||
|
||||
@property
|
||||
def cts(self):
|
||||
"""Read terminal status line: Clear To Send"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
|
||||
return struct.unpack('I', s)[0] & TIOCM_CTS != 0
|
||||
|
||||
@property
|
||||
def dsr(self):
|
||||
"""Read terminal status line: Data Set Ready"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
|
||||
return struct.unpack('I', s)[0] & TIOCM_DSR != 0
|
||||
|
||||
@property
|
||||
def ri(self):
|
||||
"""Read terminal status line: Ring Indicator"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
|
||||
return struct.unpack('I', s)[0] & TIOCM_RI != 0
|
||||
|
||||
@property
|
||||
def cd(self):
|
||||
"""Read terminal status line: Carrier Detect"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
|
||||
return struct.unpack('I', s)[0] & TIOCM_CD != 0
|
||||
|
||||
# - - platform specific - - - -
|
||||
|
||||
@property
|
||||
def out_waiting(self):
|
||||
"""Return the number of bytes currently in the output buffer."""
|
||||
#~ s = fcntl.ioctl(self.fd, termios.FIONREAD, TIOCM_zero_str)
|
||||
s = fcntl.ioctl(self.fd, TIOCOUTQ, TIOCM_zero_str)
|
||||
return struct.unpack('I', s)[0]
|
||||
|
||||
def fileno(self):
|
||||
"""\
|
||||
For easier use of the serial port instance with select.
|
||||
WARNING: this function is not portable to different platforms!
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
return self.fd
|
||||
|
||||
def set_input_flow_control(self, enable=True):
|
||||
"""\
|
||||
Manually control flow - when software flow control is enabled.
|
||||
This will send XON (true) or XOFF (false) to the other device.
|
||||
WARNING: this function is not portable to different platforms!
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
if enable:
|
||||
termios.tcflow(self.fd, termios.TCION)
|
||||
else:
|
||||
termios.tcflow(self.fd, termios.TCIOFF)
|
||||
|
||||
def set_output_flow_control(self, enable=True):
|
||||
"""\
|
||||
Manually control flow of outgoing data - when hardware or software flow
|
||||
control is enabled.
|
||||
WARNING: this function is not portable to different platforms!
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
if enable:
|
||||
termios.tcflow(self.fd, termios.TCOON)
|
||||
else:
|
||||
termios.tcflow(self.fd, termios.TCOOFF)
|
||||
|
||||
def nonblocking(self):
|
||||
"""DEPRECATED - has no use"""
|
||||
import warnings
|
||||
warnings.warn("nonblocking() has no effect, already nonblocking", DeprecationWarning)
|
||||
|
||||
|
||||
class PosixPollSerial(Serial):
|
||||
"""\
|
||||
Poll based read implementation. Not all systems support poll properly.
|
||||
However this one has better handling of errors, such as a device
|
||||
disconnecting while it's in use (e.g. USB-serial unplugged).
|
||||
"""
|
||||
|
||||
def read(self, size=1):
|
||||
"""\
|
||||
Read size bytes from the serial port. If a timeout is set it may
|
||||
return less characters as requested. With no timeout it will block
|
||||
until the requested number of bytes is read.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
read = bytearray()
|
||||
poll = select.poll()
|
||||
poll.register(self.fd, select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL)
|
||||
if size > 0:
|
||||
while len(read) < size:
|
||||
# print "\tread(): size",size, "have", len(read) #debug
|
||||
# wait until device becomes ready to read (or something fails)
|
||||
for fd, event in poll.poll(self._timeout * 1000):
|
||||
if event & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
|
||||
raise SerialException('device reports error (poll)')
|
||||
# we don't care if it is select.POLLIN or timeout, that's
|
||||
# handled below
|
||||
buf = os.read(self.fd, size - len(read))
|
||||
read.extend(buf)
|
||||
if ((self._timeout is not None and self._timeout >= 0) or
|
||||
(self._inter_byte_timeout is not None and self._inter_byte_timeout > 0)) and not buf:
|
||||
break # early abort on timeout
|
||||
return bytes(read)
|
||||
|
||||
|
||||
class VTIMESerial(Serial):
|
||||
"""\
|
||||
Implement timeout using vtime of tty device instead of using select.
|
||||
This means that no inter character timeout can be specified and that
|
||||
the error handling is degraded.
|
||||
|
||||
Overall timeout is disabled when inter-character timeout is used.
|
||||
"""
|
||||
|
||||
def _reconfigure_port(self, force_update=True):
|
||||
"""Set communication parameters on opened port."""
|
||||
super(VTIMESerial, self)._reconfigure_port()
|
||||
fcntl.fcntl(self.fd, fcntl.F_SETFL, 0) # clear O_NONBLOCK
|
||||
|
||||
if self._inter_byte_timeout is not None:
|
||||
vmin = 1
|
||||
vtime = int(self._inter_byte_timeout * 10)
|
||||
elif self._timeout is None:
|
||||
vmin = 1
|
||||
vtime = 0
|
||||
else:
|
||||
vmin = 0
|
||||
vtime = int(self._timeout * 10)
|
||||
try:
|
||||
orig_attr = termios.tcgetattr(self.fd)
|
||||
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
|
||||
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
|
||||
raise serial.SerialException("Could not configure port: {}".format(msg))
|
||||
|
||||
if vtime < 0 or vtime > 255:
|
||||
raise ValueError('Invalid vtime: {!r}'.format(vtime))
|
||||
cc[termios.VTIME] = vtime
|
||||
cc[termios.VMIN] = vmin
|
||||
|
||||
termios.tcsetattr(
|
||||
self.fd,
|
||||
termios.TCSANOW,
|
||||
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
|
||||
|
||||
def read(self, size=1):
|
||||
"""\
|
||||
Read size bytes from the serial port. If a timeout is set it may
|
||||
return less characters as requested. With no timeout it will block
|
||||
until the requested number of bytes is read.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
read = bytearray()
|
||||
while len(read) < size:
|
||||
buf = os.read(self.fd, size - len(read))
|
||||
if not buf:
|
||||
break
|
||||
read.extend(buf)
|
||||
return bytes(read)
|
||||
|
||||
# hack to make hasattr return false
|
||||
cancel_read = property()
|
||||
693
software/tools/pymcuprog/libs/serial/serialutil.py
Normal file
693
software/tools/pymcuprog/libs/serial/serialutil.py
Normal file
@@ -0,0 +1,693 @@
|
||||
#! python
|
||||
#
|
||||
# Base class and support functions used by various backends.
|
||||
#
|
||||
# This file is part of pySerial. https://github.com/pyserial/pyserial
|
||||
# (C) 2001-2016 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
import io
|
||||
import time
|
||||
|
||||
# ``memoryview`` was introduced in Python 2.7 and ``bytes(some_memoryview)``
|
||||
# isn't returning the contents (very unfortunate). Therefore we need special
|
||||
# cases and test for it. Ensure that there is a ``memoryview`` object for older
|
||||
# Python versions. This is easier than making every test dependent on its
|
||||
# existence.
|
||||
try:
|
||||
memoryview
|
||||
except (NameError, AttributeError):
|
||||
# implementation does not matter as we do not really use it.
|
||||
# it just must not inherit from something else we might care for.
|
||||
class memoryview(object): # pylint: disable=redefined-builtin,invalid-name
|
||||
pass
|
||||
|
||||
try:
|
||||
unicode
|
||||
except (NameError, AttributeError):
|
||||
unicode = str # for Python 3, pylint: disable=redefined-builtin,invalid-name
|
||||
|
||||
try:
|
||||
basestring
|
||||
except (NameError, AttributeError):
|
||||
basestring = (str,) # for Python 3, pylint: disable=redefined-builtin,invalid-name
|
||||
|
||||
|
||||
# "for byte in data" fails for python3 as it returns ints instead of bytes
|
||||
def iterbytes(b):
|
||||
"""Iterate over bytes, returning bytes instead of ints (python3)"""
|
||||
if isinstance(b, memoryview):
|
||||
b = b.tobytes()
|
||||
i = 0
|
||||
while True:
|
||||
a = b[i:i + 1]
|
||||
i += 1
|
||||
if a:
|
||||
yield a
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
# all Python versions prior 3.x convert ``str([17])`` to '[17]' instead of '\x11'
|
||||
# so a simple ``bytes(sequence)`` doesn't work for all versions
|
||||
def to_bytes(seq):
|
||||
"""convert a sequence to a bytes type"""
|
||||
if isinstance(seq, bytes):
|
||||
return seq
|
||||
elif isinstance(seq, bytearray):
|
||||
return bytes(seq)
|
||||
elif isinstance(seq, memoryview):
|
||||
return seq.tobytes()
|
||||
elif isinstance(seq, unicode):
|
||||
raise TypeError('unicode strings are not supported, please encode to bytes: {!r}'.format(seq))
|
||||
else:
|
||||
# handle list of integers and bytes (one or more items) for Python 2 and 3
|
||||
return bytes(bytearray(seq))
|
||||
|
||||
|
||||
# create control bytes
|
||||
XON = to_bytes([17])
|
||||
XOFF = to_bytes([19])
|
||||
|
||||
CR = to_bytes([13])
|
||||
LF = to_bytes([10])
|
||||
|
||||
|
||||
PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S'
|
||||
STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO = (1, 1.5, 2)
|
||||
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
|
||||
|
||||
PARITY_NAMES = {
|
||||
PARITY_NONE: 'None',
|
||||
PARITY_EVEN: 'Even',
|
||||
PARITY_ODD: 'Odd',
|
||||
PARITY_MARK: 'Mark',
|
||||
PARITY_SPACE: 'Space',
|
||||
}
|
||||
|
||||
|
||||
class SerialException(IOError):
|
||||
"""Base class for serial port related exceptions."""
|
||||
|
||||
|
||||
class SerialTimeoutException(SerialException):
|
||||
"""Write timeouts give an exception"""
|
||||
|
||||
|
||||
writeTimeoutError = SerialTimeoutException('Write timeout')
|
||||
portNotOpenError = SerialException('Attempting to use a port that is not open')
|
||||
|
||||
|
||||
class Timeout(object):
|
||||
"""\
|
||||
Abstraction for timeout operations. Using time.monotonic() if available
|
||||
or time.time() in all other cases.
|
||||
|
||||
The class can also be initialized with 0 or None, in order to support
|
||||
non-blocking and fully blocking I/O operations. The attributes
|
||||
is_non_blocking and is_infinite are set accordingly.
|
||||
"""
|
||||
if hasattr(time, 'monotonic'):
|
||||
# Timeout implementation with time.monotonic(). This function is only
|
||||
# supported by Python 3.3 and above. It returns a time in seconds
|
||||
# (float) just as time.time(), but is not affected by system clock
|
||||
# adjustments.
|
||||
TIME = time.monotonic
|
||||
else:
|
||||
# Timeout implementation with time.time(). This is compatible with all
|
||||
# Python versions but has issues if the clock is adjusted while the
|
||||
# timeout is running.
|
||||
TIME = time.time
|
||||
|
||||
def __init__(self, duration):
|
||||
"""Initialize a timeout with given duration"""
|
||||
self.is_infinite = (duration is None)
|
||||
self.is_non_blocking = (duration == 0)
|
||||
self.duration = duration
|
||||
if duration is not None:
|
||||
self.target_time = self.TIME() + duration
|
||||
else:
|
||||
self.target_time = None
|
||||
|
||||
def expired(self):
|
||||
"""Return a boolean, telling if the timeout has expired"""
|
||||
return self.target_time is not None and self.time_left() <= 0
|
||||
|
||||
def time_left(self):
|
||||
"""Return how many seconds are left until the timeout expires"""
|
||||
if self.is_non_blocking:
|
||||
return 0
|
||||
elif self.is_infinite:
|
||||
return None
|
||||
else:
|
||||
delta = self.target_time - self.TIME()
|
||||
if delta > self.duration:
|
||||
# clock jumped, recalculate
|
||||
self.target_time = self.TIME() + self.duration
|
||||
return self.duration
|
||||
else:
|
||||
return max(0, delta)
|
||||
|
||||
def restart(self, duration):
|
||||
"""\
|
||||
Restart a timeout, only supported if a timeout was already set up
|
||||
before.
|
||||
"""
|
||||
self.duration = duration
|
||||
self.target_time = self.TIME() + duration
|
||||
|
||||
|
||||
class SerialBase(io.RawIOBase):
|
||||
"""\
|
||||
Serial port base class. Provides __init__ function and properties to
|
||||
get/set port settings.
|
||||
"""
|
||||
|
||||
# default values, may be overridden in subclasses that do not support all values
|
||||
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
|
||||
9600, 19200, 38400, 57600, 115200, 230400, 460800, 500000,
|
||||
576000, 921600, 1000000, 1152000, 1500000, 2000000, 2500000,
|
||||
3000000, 3500000, 4000000)
|
||||
BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
|
||||
PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE)
|
||||
STOPBITS = (STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO)
|
||||
|
||||
def __init__(self,
|
||||
port=None,
|
||||
baudrate=9600,
|
||||
bytesize=EIGHTBITS,
|
||||
parity=PARITY_NONE,
|
||||
stopbits=STOPBITS_ONE,
|
||||
timeout=None,
|
||||
xonxoff=False,
|
||||
rtscts=False,
|
||||
write_timeout=None,
|
||||
dsrdtr=False,
|
||||
inter_byte_timeout=None,
|
||||
exclusive=None,
|
||||
**kwargs):
|
||||
"""\
|
||||
Initialize comm port object. If a "port" is given, then the port will be
|
||||
opened immediately. Otherwise a Serial port object in closed state
|
||||
is returned.
|
||||
"""
|
||||
|
||||
self.is_open = False
|
||||
self.portstr = None
|
||||
self.name = None
|
||||
# correct values are assigned below through properties
|
||||
self._port = None
|
||||
self._baudrate = None
|
||||
self._bytesize = None
|
||||
self._parity = None
|
||||
self._stopbits = None
|
||||
self._timeout = None
|
||||
self._write_timeout = None
|
||||
self._xonxoff = None
|
||||
self._rtscts = None
|
||||
self._dsrdtr = None
|
||||
self._inter_byte_timeout = None
|
||||
self._rs485_mode = None # disabled by default
|
||||
self._rts_state = True
|
||||
self._dtr_state = True
|
||||
self._break_state = False
|
||||
self._exclusive = None
|
||||
|
||||
# assign values using get/set methods using the properties feature
|
||||
self.port = port
|
||||
self.baudrate = baudrate
|
||||
self.bytesize = bytesize
|
||||
self.parity = parity
|
||||
self.stopbits = stopbits
|
||||
self.timeout = timeout
|
||||
self.write_timeout = write_timeout
|
||||
self.xonxoff = xonxoff
|
||||
self.rtscts = rtscts
|
||||
self.dsrdtr = dsrdtr
|
||||
self.inter_byte_timeout = inter_byte_timeout
|
||||
self.exclusive = exclusive
|
||||
|
||||
# watch for backward compatible kwargs
|
||||
if 'writeTimeout' in kwargs:
|
||||
self.write_timeout = kwargs.pop('writeTimeout')
|
||||
if 'interCharTimeout' in kwargs:
|
||||
self.inter_byte_timeout = kwargs.pop('interCharTimeout')
|
||||
if kwargs:
|
||||
raise ValueError('unexpected keyword arguments: {!r}'.format(kwargs))
|
||||
|
||||
if port is not None:
|
||||
self.open()
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
# to be implemented by subclasses:
|
||||
# def open(self):
|
||||
# def close(self):
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
"""\
|
||||
Get the current port setting. The value that was passed on init or using
|
||||
setPort() is passed back.
|
||||
"""
|
||||
return self._port
|
||||
|
||||
@port.setter
|
||||
def port(self, port):
|
||||
"""\
|
||||
Change the port.
|
||||
"""
|
||||
if port is not None and not isinstance(port, basestring):
|
||||
raise ValueError('"port" must be None or a string, not {}'.format(type(port)))
|
||||
was_open = self.is_open
|
||||
if was_open:
|
||||
self.close()
|
||||
self.portstr = port
|
||||
self._port = port
|
||||
self.name = self.portstr
|
||||
if was_open:
|
||||
self.open()
|
||||
|
||||
@property
|
||||
def baudrate(self):
|
||||
"""Get the current baud rate setting."""
|
||||
return self._baudrate
|
||||
|
||||
@baudrate.setter
|
||||
def baudrate(self, baudrate):
|
||||
"""\
|
||||
Change baud rate. It raises a ValueError if the port is open and the
|
||||
baud rate is not possible. If the port is closed, then the value is
|
||||
accepted and the exception is raised when the port is opened.
|
||||
"""
|
||||
try:
|
||||
b = int(baudrate)
|
||||
except TypeError:
|
||||
raise ValueError("Not a valid baudrate: {!r}".format(baudrate))
|
||||
else:
|
||||
if b < 0:
|
||||
raise ValueError("Not a valid baudrate: {!r}".format(baudrate))
|
||||
self._baudrate = b
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def bytesize(self):
|
||||
"""Get the current byte size setting."""
|
||||
return self._bytesize
|
||||
|
||||
@bytesize.setter
|
||||
def bytesize(self, bytesize):
|
||||
"""Change byte size."""
|
||||
if bytesize not in self.BYTESIZES:
|
||||
raise ValueError("Not a valid byte size: {!r}".format(bytesize))
|
||||
self._bytesize = bytesize
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def exclusive(self):
|
||||
"""Get the current exclusive access setting."""
|
||||
return self._exclusive
|
||||
|
||||
@exclusive.setter
|
||||
def exclusive(self, exclusive):
|
||||
"""Change the exclusive access setting."""
|
||||
self._exclusive = exclusive
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def parity(self):
|
||||
"""Get the current parity setting."""
|
||||
return self._parity
|
||||
|
||||
@parity.setter
|
||||
def parity(self, parity):
|
||||
"""Change parity setting."""
|
||||
if parity not in self.PARITIES:
|
||||
raise ValueError("Not a valid parity: {!r}".format(parity))
|
||||
self._parity = parity
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def stopbits(self):
|
||||
"""Get the current stop bits setting."""
|
||||
return self._stopbits
|
||||
|
||||
@stopbits.setter
|
||||
def stopbits(self, stopbits):
|
||||
"""Change stop bits size."""
|
||||
if stopbits not in self.STOPBITS:
|
||||
raise ValueError("Not a valid stop bit size: {!r}".format(stopbits))
|
||||
self._stopbits = stopbits
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def timeout(self):
|
||||
"""Get the current timeout setting."""
|
||||
return self._timeout
|
||||
|
||||
@timeout.setter
|
||||
def timeout(self, timeout):
|
||||
"""Change timeout setting."""
|
||||
if timeout is not None:
|
||||
try:
|
||||
timeout + 1 # test if it's a number, will throw a TypeError if not...
|
||||
except TypeError:
|
||||
raise ValueError("Not a valid timeout: {!r}".format(timeout))
|
||||
if timeout < 0:
|
||||
raise ValueError("Not a valid timeout: {!r}".format(timeout))
|
||||
self._timeout = timeout
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def write_timeout(self):
|
||||
"""Get the current timeout setting."""
|
||||
return self._write_timeout
|
||||
|
||||
@write_timeout.setter
|
||||
def write_timeout(self, timeout):
|
||||
"""Change timeout setting."""
|
||||
if timeout is not None:
|
||||
if timeout < 0:
|
||||
raise ValueError("Not a valid timeout: {!r}".format(timeout))
|
||||
try:
|
||||
timeout + 1 # test if it's a number, will throw a TypeError if not...
|
||||
except TypeError:
|
||||
raise ValueError("Not a valid timeout: {!r}".format(timeout))
|
||||
|
||||
self._write_timeout = timeout
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def inter_byte_timeout(self):
|
||||
"""Get the current inter-character timeout setting."""
|
||||
return self._inter_byte_timeout
|
||||
|
||||
@inter_byte_timeout.setter
|
||||
def inter_byte_timeout(self, ic_timeout):
|
||||
"""Change inter-byte timeout setting."""
|
||||
if ic_timeout is not None:
|
||||
if ic_timeout < 0:
|
||||
raise ValueError("Not a valid timeout: {!r}".format(ic_timeout))
|
||||
try:
|
||||
ic_timeout + 1 # test if it's a number, will throw a TypeError if not...
|
||||
except TypeError:
|
||||
raise ValueError("Not a valid timeout: {!r}".format(ic_timeout))
|
||||
|
||||
self._inter_byte_timeout = ic_timeout
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def xonxoff(self):
|
||||
"""Get the current XON/XOFF setting."""
|
||||
return self._xonxoff
|
||||
|
||||
@xonxoff.setter
|
||||
def xonxoff(self, xonxoff):
|
||||
"""Change XON/XOFF setting."""
|
||||
self._xonxoff = xonxoff
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def rtscts(self):
|
||||
"""Get the current RTS/CTS flow control setting."""
|
||||
return self._rtscts
|
||||
|
||||
@rtscts.setter
|
||||
def rtscts(self, rtscts):
|
||||
"""Change RTS/CTS flow control setting."""
|
||||
self._rtscts = rtscts
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def dsrdtr(self):
|
||||
"""Get the current DSR/DTR flow control setting."""
|
||||
return self._dsrdtr
|
||||
|
||||
@dsrdtr.setter
|
||||
def dsrdtr(self, dsrdtr=None):
|
||||
"""Change DsrDtr flow control setting."""
|
||||
if dsrdtr is None:
|
||||
# if not set, keep backwards compatibility and follow rtscts setting
|
||||
self._dsrdtr = self._rtscts
|
||||
else:
|
||||
# if defined independently, follow its value
|
||||
self._dsrdtr = dsrdtr
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
@property
|
||||
def rts(self):
|
||||
return self._rts_state
|
||||
|
||||
@rts.setter
|
||||
def rts(self, value):
|
||||
self._rts_state = value
|
||||
if self.is_open:
|
||||
self._update_rts_state()
|
||||
|
||||
@property
|
||||
def dtr(self):
|
||||
return self._dtr_state
|
||||
|
||||
@dtr.setter
|
||||
def dtr(self, value):
|
||||
self._dtr_state = value
|
||||
if self.is_open:
|
||||
self._update_dtr_state()
|
||||
|
||||
@property
|
||||
def break_condition(self):
|
||||
return self._break_state
|
||||
|
||||
@break_condition.setter
|
||||
def break_condition(self, value):
|
||||
self._break_state = value
|
||||
if self.is_open:
|
||||
self._update_break_state()
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# functions useful for RS-485 adapters
|
||||
|
||||
@property
|
||||
def rs485_mode(self):
|
||||
"""\
|
||||
Enable RS485 mode and apply new settings, set to None to disable.
|
||||
See serial.rs485.RS485Settings for more info about the value.
|
||||
"""
|
||||
return self._rs485_mode
|
||||
|
||||
@rs485_mode.setter
|
||||
def rs485_mode(self, rs485_settings):
|
||||
self._rs485_mode = rs485_settings
|
||||
if self.is_open:
|
||||
self._reconfigure_port()
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
_SAVED_SETTINGS = ('baudrate', 'bytesize', 'parity', 'stopbits', 'xonxoff',
|
||||
'dsrdtr', 'rtscts', 'timeout', 'write_timeout',
|
||||
'inter_byte_timeout')
|
||||
|
||||
def get_settings(self):
|
||||
"""\
|
||||
Get current port settings as a dictionary. For use with
|
||||
apply_settings().
|
||||
"""
|
||||
return dict([(key, getattr(self, '_' + key)) for key in self._SAVED_SETTINGS])
|
||||
|
||||
def apply_settings(self, d):
|
||||
"""\
|
||||
Apply stored settings from a dictionary returned from
|
||||
get_settings(). It's allowed to delete keys from the dictionary. These
|
||||
values will simply left unchanged.
|
||||
"""
|
||||
for key in self._SAVED_SETTINGS:
|
||||
if key in d and d[key] != getattr(self, '_' + key): # check against internal "_" value
|
||||
setattr(self, key, d[key]) # set non "_" value to use properties write function
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
def __repr__(self):
|
||||
"""String representation of the current port settings and its state."""
|
||||
return '{name}<id=0x{id:x}, open={p.is_open}>(port={p.portstr!r}, ' \
|
||||
'baudrate={p.baudrate!r}, bytesize={p.bytesize!r}, parity={p.parity!r}, ' \
|
||||
'stopbits={p.stopbits!r}, timeout={p.timeout!r}, xonxoff={p.xonxoff!r}, ' \
|
||||
'rtscts={p.rtscts!r}, dsrdtr={p.dsrdtr!r})'.format(
|
||||
name=self.__class__.__name__, id=id(self), p=self)
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# compatibility with io library
|
||||
# pylint: disable=invalid-name,missing-docstring
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def writable(self):
|
||||
return True
|
||||
|
||||
def seekable(self):
|
||||
return False
|
||||
|
||||
def readinto(self, b):
|
||||
data = self.read(len(b))
|
||||
n = len(data)
|
||||
try:
|
||||
b[:n] = data
|
||||
except TypeError as err:
|
||||
import array
|
||||
if not isinstance(b, array.array):
|
||||
raise err
|
||||
b[:n] = array.array('b', data)
|
||||
return n
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# context manager
|
||||
|
||||
def __enter__(self):
|
||||
if not self.is_open:
|
||||
self.open()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.close()
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
def send_break(self, duration=0.25):
|
||||
"""\
|
||||
Send break condition. Timed, returns to idle state after given
|
||||
duration.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
self.break_condition = True
|
||||
time.sleep(duration)
|
||||
self.break_condition = False
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# backwards compatibility / deprecated functions
|
||||
|
||||
def flushInput(self):
|
||||
self.reset_input_buffer()
|
||||
|
||||
def flushOutput(self):
|
||||
self.reset_output_buffer()
|
||||
|
||||
def inWaiting(self):
|
||||
return self.in_waiting
|
||||
|
||||
def sendBreak(self, duration=0.25):
|
||||
self.send_break(duration)
|
||||
|
||||
def setRTS(self, value=1):
|
||||
self.rts = value
|
||||
|
||||
def setDTR(self, value=1):
|
||||
self.dtr = value
|
||||
|
||||
def getCTS(self):
|
||||
return self.cts
|
||||
|
||||
def getDSR(self):
|
||||
return self.dsr
|
||||
|
||||
def getRI(self):
|
||||
return self.ri
|
||||
|
||||
def getCD(self):
|
||||
return self.cd
|
||||
|
||||
def setPort(self, port):
|
||||
self.port = port
|
||||
|
||||
@property
|
||||
def writeTimeout(self):
|
||||
return self.write_timeout
|
||||
|
||||
@writeTimeout.setter
|
||||
def writeTimeout(self, timeout):
|
||||
self.write_timeout = timeout
|
||||
|
||||
@property
|
||||
def interCharTimeout(self):
|
||||
return self.inter_byte_timeout
|
||||
|
||||
@interCharTimeout.setter
|
||||
def interCharTimeout(self, interCharTimeout):
|
||||
self.inter_byte_timeout = interCharTimeout
|
||||
|
||||
def getSettingsDict(self):
|
||||
return self.get_settings()
|
||||
|
||||
def applySettingsDict(self, d):
|
||||
self.apply_settings(d)
|
||||
|
||||
def isOpen(self):
|
||||
return self.is_open
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# additional functionality
|
||||
|
||||
def read_all(self):
|
||||
"""\
|
||||
Read all bytes currently available in the buffer of the OS.
|
||||
"""
|
||||
return self.read(self.in_waiting)
|
||||
|
||||
def read_until(self, terminator=LF, size=None):
|
||||
"""\
|
||||
Read until a termination sequence is found ('\n' by default), the size
|
||||
is exceeded or until timeout occurs.
|
||||
"""
|
||||
lenterm = len(terminator)
|
||||
line = bytearray()
|
||||
timeout = Timeout(self._timeout)
|
||||
while True:
|
||||
c = self.read(1)
|
||||
if c:
|
||||
line += c
|
||||
if line[-lenterm:] == terminator:
|
||||
break
|
||||
if size is not None and len(line) >= size:
|
||||
break
|
||||
else:
|
||||
break
|
||||
if timeout.expired():
|
||||
break
|
||||
return bytes(line)
|
||||
|
||||
def iread_until(self, *args, **kwargs):
|
||||
"""\
|
||||
Read lines, implemented as generator. It will raise StopIteration on
|
||||
timeout (empty read).
|
||||
"""
|
||||
while True:
|
||||
line = self.read_until(*args, **kwargs)
|
||||
if not line:
|
||||
break
|
||||
yield line
|
||||
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
s = SerialBase()
|
||||
sys.stdout.write('port name: {}\n'.format(s.name))
|
||||
sys.stdout.write('baud rates: {}\n'.format(s.BAUDRATES))
|
||||
sys.stdout.write('byte sizes: {}\n'.format(s.BYTESIZES))
|
||||
sys.stdout.write('parities: {}\n'.format(s.PARITIES))
|
||||
sys.stdout.write('stop bits: {}\n'.format(s.STOPBITS))
|
||||
sys.stdout.write('{}\n'.format(s))
|
||||
475
software/tools/pymcuprog/libs/serial/serialwin32.py
Normal file
475
software/tools/pymcuprog/libs/serial/serialwin32.py
Normal file
@@ -0,0 +1,475 @@
|
||||
#! python
|
||||
#
|
||||
# backend for Windows ("win32" incl. 32/64 bit support)
|
||||
#
|
||||
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# This file is part of pySerial. https://github.com/pyserial/pyserial
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
# Initial patch to use ctypes by Giovanni Bajo <rasky@develer.com>
|
||||
|
||||
# pylint: disable=invalid-name,too-few-public-methods
|
||||
import ctypes
|
||||
import time
|
||||
from serial import win32
|
||||
|
||||
import serial
|
||||
from serial.serialutil import SerialBase, SerialException, to_bytes, portNotOpenError, writeTimeoutError
|
||||
|
||||
|
||||
class Serial(SerialBase):
|
||||
"""Serial port implementation for Win32 based on ctypes."""
|
||||
|
||||
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
|
||||
9600, 19200, 38400, 57600, 115200)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._port_handle = None
|
||||
self._overlapped_read = None
|
||||
self._overlapped_write = None
|
||||
super(Serial, self).__init__(*args, **kwargs)
|
||||
|
||||
def open(self):
|
||||
"""\
|
||||
Open port with current settings. This may throw a SerialException
|
||||
if the port cannot be opened.
|
||||
"""
|
||||
if self._port is None:
|
||||
raise SerialException("Port must be configured before it can be used.")
|
||||
if self.is_open:
|
||||
raise SerialException("Port is already open.")
|
||||
# the "\\.\COMx" format is required for devices other than COM1-COM8
|
||||
# not all versions of windows seem to support this properly
|
||||
# so that the first few ports are used with the DOS device name
|
||||
port = self.name
|
||||
try:
|
||||
if port.upper().startswith('COM') and int(port[3:]) > 8:
|
||||
port = '\\\\.\\' + port
|
||||
except ValueError:
|
||||
# for like COMnotanumber
|
||||
pass
|
||||
self._port_handle = win32.CreateFile(
|
||||
port,
|
||||
win32.GENERIC_READ | win32.GENERIC_WRITE,
|
||||
0, # exclusive access
|
||||
None, # no security
|
||||
win32.OPEN_EXISTING,
|
||||
win32.FILE_ATTRIBUTE_NORMAL | win32.FILE_FLAG_OVERLAPPED,
|
||||
0)
|
||||
if self._port_handle == win32.INVALID_HANDLE_VALUE:
|
||||
self._port_handle = None # 'cause __del__ is called anyway
|
||||
raise SerialException("could not open port {!r}: {!r}".format(self.portstr, ctypes.WinError()))
|
||||
|
||||
try:
|
||||
self._overlapped_read = win32.OVERLAPPED()
|
||||
self._overlapped_read.hEvent = win32.CreateEvent(None, 1, 0, None)
|
||||
self._overlapped_write = win32.OVERLAPPED()
|
||||
#~ self._overlapped_write.hEvent = win32.CreateEvent(None, 1, 0, None)
|
||||
self._overlapped_write.hEvent = win32.CreateEvent(None, 0, 0, None)
|
||||
|
||||
# Setup a 4k buffer
|
||||
win32.SetupComm(self._port_handle, 4096, 4096)
|
||||
|
||||
# Save original timeout values:
|
||||
self._orgTimeouts = win32.COMMTIMEOUTS()
|
||||
win32.GetCommTimeouts(self._port_handle, ctypes.byref(self._orgTimeouts))
|
||||
|
||||
self._reconfigure_port()
|
||||
|
||||
# Clear buffers:
|
||||
# Remove anything that was there
|
||||
win32.PurgeComm(
|
||||
self._port_handle,
|
||||
win32.PURGE_TXCLEAR | win32.PURGE_TXABORT |
|
||||
win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
|
||||
except:
|
||||
try:
|
||||
self._close()
|
||||
except:
|
||||
# ignore any exception when closing the port
|
||||
# also to keep original exception that happened when setting up
|
||||
pass
|
||||
self._port_handle = None
|
||||
raise
|
||||
else:
|
||||
self.is_open = True
|
||||
|
||||
def _reconfigure_port(self):
|
||||
"""Set communication parameters on opened port."""
|
||||
if not self._port_handle:
|
||||
raise SerialException("Can only operate on a valid port handle")
|
||||
|
||||
# Set Windows timeout values
|
||||
# timeouts is a tuple with the following items:
|
||||
# (ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
|
||||
# ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
|
||||
# WriteTotalTimeoutConstant)
|
||||
timeouts = win32.COMMTIMEOUTS()
|
||||
if self._timeout is None:
|
||||
pass # default of all zeros is OK
|
||||
elif self._timeout == 0:
|
||||
timeouts.ReadIntervalTimeout = win32.MAXDWORD
|
||||
else:
|
||||
timeouts.ReadTotalTimeoutConstant = max(int(self._timeout * 1000), 1)
|
||||
if self._timeout != 0 and self._inter_byte_timeout is not None:
|
||||
timeouts.ReadIntervalTimeout = max(int(self._inter_byte_timeout * 1000), 1)
|
||||
|
||||
if self._write_timeout is None:
|
||||
pass
|
||||
elif self._write_timeout == 0:
|
||||
timeouts.WriteTotalTimeoutConstant = win32.MAXDWORD
|
||||
else:
|
||||
timeouts.WriteTotalTimeoutConstant = max(int(self._write_timeout * 1000), 1)
|
||||
win32.SetCommTimeouts(self._port_handle, ctypes.byref(timeouts))
|
||||
|
||||
win32.SetCommMask(self._port_handle, win32.EV_ERR)
|
||||
|
||||
# Setup the connection info.
|
||||
# Get state and modify it:
|
||||
comDCB = win32.DCB()
|
||||
win32.GetCommState(self._port_handle, ctypes.byref(comDCB))
|
||||
comDCB.BaudRate = self._baudrate
|
||||
|
||||
if self._bytesize == serial.FIVEBITS:
|
||||
comDCB.ByteSize = 5
|
||||
elif self._bytesize == serial.SIXBITS:
|
||||
comDCB.ByteSize = 6
|
||||
elif self._bytesize == serial.SEVENBITS:
|
||||
comDCB.ByteSize = 7
|
||||
elif self._bytesize == serial.EIGHTBITS:
|
||||
comDCB.ByteSize = 8
|
||||
else:
|
||||
raise ValueError("Unsupported number of data bits: {!r}".format(self._bytesize))
|
||||
|
||||
if self._parity == serial.PARITY_NONE:
|
||||
comDCB.Parity = win32.NOPARITY
|
||||
comDCB.fParity = 0 # Disable Parity Check
|
||||
elif self._parity == serial.PARITY_EVEN:
|
||||
comDCB.Parity = win32.EVENPARITY
|
||||
comDCB.fParity = 1 # Enable Parity Check
|
||||
elif self._parity == serial.PARITY_ODD:
|
||||
comDCB.Parity = win32.ODDPARITY
|
||||
comDCB.fParity = 1 # Enable Parity Check
|
||||
elif self._parity == serial.PARITY_MARK:
|
||||
comDCB.Parity = win32.MARKPARITY
|
||||
comDCB.fParity = 1 # Enable Parity Check
|
||||
elif self._parity == serial.PARITY_SPACE:
|
||||
comDCB.Parity = win32.SPACEPARITY
|
||||
comDCB.fParity = 1 # Enable Parity Check
|
||||
else:
|
||||
raise ValueError("Unsupported parity mode: {!r}".format(self._parity))
|
||||
|
||||
if self._stopbits == serial.STOPBITS_ONE:
|
||||
comDCB.StopBits = win32.ONESTOPBIT
|
||||
elif self._stopbits == serial.STOPBITS_ONE_POINT_FIVE:
|
||||
comDCB.StopBits = win32.ONE5STOPBITS
|
||||
elif self._stopbits == serial.STOPBITS_TWO:
|
||||
comDCB.StopBits = win32.TWOSTOPBITS
|
||||
else:
|
||||
raise ValueError("Unsupported number of stop bits: {!r}".format(self._stopbits))
|
||||
|
||||
comDCB.fBinary = 1 # Enable Binary Transmission
|
||||
# Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
|
||||
if self._rs485_mode is None:
|
||||
if self._rtscts:
|
||||
comDCB.fRtsControl = win32.RTS_CONTROL_HANDSHAKE
|
||||
else:
|
||||
comDCB.fRtsControl = win32.RTS_CONTROL_ENABLE if self._rts_state else win32.RTS_CONTROL_DISABLE
|
||||
comDCB.fOutxCtsFlow = self._rtscts
|
||||
else:
|
||||
# checks for unsupported settings
|
||||
# XXX verify if platform really does not have a setting for those
|
||||
if not self._rs485_mode.rts_level_for_tx:
|
||||
raise ValueError(
|
||||
'Unsupported value for RS485Settings.rts_level_for_tx: {!r}'.format(
|
||||
self._rs485_mode.rts_level_for_tx,))
|
||||
if self._rs485_mode.rts_level_for_rx:
|
||||
raise ValueError(
|
||||
'Unsupported value for RS485Settings.rts_level_for_rx: {!r}'.format(
|
||||
self._rs485_mode.rts_level_for_rx,))
|
||||
if self._rs485_mode.delay_before_tx is not None:
|
||||
raise ValueError(
|
||||
'Unsupported value for RS485Settings.delay_before_tx: {!r}'.format(
|
||||
self._rs485_mode.delay_before_tx,))
|
||||
if self._rs485_mode.delay_before_rx is not None:
|
||||
raise ValueError(
|
||||
'Unsupported value for RS485Settings.delay_before_rx: {!r}'.format(
|
||||
self._rs485_mode.delay_before_rx,))
|
||||
if self._rs485_mode.loopback:
|
||||
raise ValueError(
|
||||
'Unsupported value for RS485Settings.loopback: {!r}'.format(
|
||||
self._rs485_mode.loopback,))
|
||||
comDCB.fRtsControl = win32.RTS_CONTROL_TOGGLE
|
||||
comDCB.fOutxCtsFlow = 0
|
||||
|
||||
if self._dsrdtr:
|
||||
comDCB.fDtrControl = win32.DTR_CONTROL_HANDSHAKE
|
||||
else:
|
||||
comDCB.fDtrControl = win32.DTR_CONTROL_ENABLE if self._dtr_state else win32.DTR_CONTROL_DISABLE
|
||||
comDCB.fOutxDsrFlow = self._dsrdtr
|
||||
comDCB.fOutX = self._xonxoff
|
||||
comDCB.fInX = self._xonxoff
|
||||
comDCB.fNull = 0
|
||||
comDCB.fErrorChar = 0
|
||||
comDCB.fAbortOnError = 0
|
||||
comDCB.XonChar = serial.XON
|
||||
comDCB.XoffChar = serial.XOFF
|
||||
|
||||
if not win32.SetCommState(self._port_handle, ctypes.byref(comDCB)):
|
||||
raise SerialException(
|
||||
'Cannot configure port, something went wrong. '
|
||||
'Original message: {!r}'.format(ctypes.WinError()))
|
||||
|
||||
#~ def __del__(self):
|
||||
#~ self.close()
|
||||
|
||||
def _close(self):
|
||||
"""internal close port helper"""
|
||||
if self._port_handle is not None:
|
||||
# Restore original timeout values:
|
||||
win32.SetCommTimeouts(self._port_handle, self._orgTimeouts)
|
||||
if self._overlapped_read is not None:
|
||||
self.cancel_read()
|
||||
win32.CloseHandle(self._overlapped_read.hEvent)
|
||||
self._overlapped_read = None
|
||||
if self._overlapped_write is not None:
|
||||
self.cancel_write()
|
||||
win32.CloseHandle(self._overlapped_write.hEvent)
|
||||
self._overlapped_write = None
|
||||
win32.CloseHandle(self._port_handle)
|
||||
self._port_handle = None
|
||||
|
||||
def close(self):
|
||||
"""Close port"""
|
||||
if self.is_open:
|
||||
self._close()
|
||||
self.is_open = False
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
@property
|
||||
def in_waiting(self):
|
||||
"""Return the number of bytes currently in the input buffer."""
|
||||
flags = win32.DWORD()
|
||||
comstat = win32.COMSTAT()
|
||||
if not win32.ClearCommError(self._port_handle, ctypes.byref(flags), ctypes.byref(comstat)):
|
||||
raise SerialException("ClearCommError failed ({!r})".format(ctypes.WinError()))
|
||||
return comstat.cbInQue
|
||||
|
||||
def read(self, size=1):
|
||||
"""\
|
||||
Read size bytes from the serial port. If a timeout is set it may
|
||||
return less characters as requested. With no timeout it will block
|
||||
until the requested number of bytes is read.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
if size > 0:
|
||||
win32.ResetEvent(self._overlapped_read.hEvent)
|
||||
flags = win32.DWORD()
|
||||
comstat = win32.COMSTAT()
|
||||
if not win32.ClearCommError(self._port_handle, ctypes.byref(flags), ctypes.byref(comstat)):
|
||||
raise SerialException("ClearCommError failed ({!r})".format(ctypes.WinError()))
|
||||
n = min(comstat.cbInQue, size) if self.timeout == 0 else size
|
||||
if n > 0:
|
||||
buf = ctypes.create_string_buffer(n)
|
||||
rc = win32.DWORD()
|
||||
read_ok = win32.ReadFile(
|
||||
self._port_handle,
|
||||
buf,
|
||||
n,
|
||||
ctypes.byref(rc),
|
||||
ctypes.byref(self._overlapped_read))
|
||||
if not read_ok and win32.GetLastError() not in (win32.ERROR_SUCCESS, win32.ERROR_IO_PENDING):
|
||||
raise SerialException("ReadFile failed ({!r})".format(ctypes.WinError()))
|
||||
result_ok = win32.GetOverlappedResult(
|
||||
self._port_handle,
|
||||
ctypes.byref(self._overlapped_read),
|
||||
ctypes.byref(rc),
|
||||
True)
|
||||
if not result_ok:
|
||||
if win32.GetLastError() != win32.ERROR_OPERATION_ABORTED:
|
||||
raise SerialException("GetOverlappedResult failed ({!r})".format(ctypes.WinError()))
|
||||
read = buf.raw[:rc.value]
|
||||
else:
|
||||
read = bytes()
|
||||
else:
|
||||
read = bytes()
|
||||
return bytes(read)
|
||||
|
||||
def write(self, data):
|
||||
"""Output the given byte string over the serial port."""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
#~ if not isinstance(data, (bytes, bytearray)):
|
||||
#~ raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
|
||||
# convert data (needed in case of memoryview instance: Py 3.1 io lib), ctypes doesn't like memoryview
|
||||
data = to_bytes(data)
|
||||
if data:
|
||||
#~ win32event.ResetEvent(self._overlapped_write.hEvent)
|
||||
n = win32.DWORD()
|
||||
success = win32.WriteFile(self._port_handle, data, len(data), ctypes.byref(n), self._overlapped_write)
|
||||
if self._write_timeout != 0: # if blocking (None) or w/ write timeout (>0)
|
||||
if not success and win32.GetLastError() not in (win32.ERROR_SUCCESS, win32.ERROR_IO_PENDING):
|
||||
raise SerialException("WriteFile failed ({!r})".format(ctypes.WinError()))
|
||||
|
||||
# Wait for the write to complete.
|
||||
#~ win32.WaitForSingleObject(self._overlapped_write.hEvent, win32.INFINITE)
|
||||
win32.GetOverlappedResult(self._port_handle, self._overlapped_write, ctypes.byref(n), True)
|
||||
if win32.GetLastError() == win32.ERROR_OPERATION_ABORTED:
|
||||
return n.value # canceled IO is no error
|
||||
if n.value != len(data):
|
||||
raise writeTimeoutError
|
||||
return n.value
|
||||
else:
|
||||
errorcode = win32.ERROR_SUCCESS if success else win32.GetLastError()
|
||||
if errorcode in (win32.ERROR_INVALID_USER_BUFFER, win32.ERROR_NOT_ENOUGH_MEMORY,
|
||||
win32.ERROR_OPERATION_ABORTED):
|
||||
return 0
|
||||
elif errorcode in (win32.ERROR_SUCCESS, win32.ERROR_IO_PENDING):
|
||||
# no info on true length provided by OS function in async mode
|
||||
return len(data)
|
||||
else:
|
||||
raise SerialException("WriteFile failed ({!r})".format(ctypes.WinError()))
|
||||
else:
|
||||
return 0
|
||||
|
||||
def flush(self):
|
||||
"""\
|
||||
Flush of file like objects. In this case, wait until all data
|
||||
is written.
|
||||
"""
|
||||
while self.out_waiting:
|
||||
time.sleep(0.05)
|
||||
# XXX could also use WaitCommEvent with mask EV_TXEMPTY, but it would
|
||||
# require overlapped IO and it's also only possible to set a single mask
|
||||
# on the port---
|
||||
|
||||
def reset_input_buffer(self):
|
||||
"""Clear input buffer, discarding all that is in the buffer."""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
win32.PurgeComm(self._port_handle, win32.PURGE_RXCLEAR | win32.PURGE_RXABORT)
|
||||
|
||||
def reset_output_buffer(self):
|
||||
"""\
|
||||
Clear output buffer, aborting the current output and discarding all
|
||||
that is in the buffer.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
win32.PurgeComm(self._port_handle, win32.PURGE_TXCLEAR | win32.PURGE_TXABORT)
|
||||
|
||||
def _update_break_state(self):
|
||||
"""Set break: Controls TXD. When active, to transmitting is possible."""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
if self._break_state:
|
||||
win32.SetCommBreak(self._port_handle)
|
||||
else:
|
||||
win32.ClearCommBreak(self._port_handle)
|
||||
|
||||
def _update_rts_state(self):
|
||||
"""Set terminal status line: Request To Send"""
|
||||
if self._rts_state:
|
||||
win32.EscapeCommFunction(self._port_handle, win32.SETRTS)
|
||||
else:
|
||||
win32.EscapeCommFunction(self._port_handle, win32.CLRRTS)
|
||||
|
||||
def _update_dtr_state(self):
|
||||
"""Set terminal status line: Data Terminal Ready"""
|
||||
if self._dtr_state:
|
||||
win32.EscapeCommFunction(self._port_handle, win32.SETDTR)
|
||||
else:
|
||||
win32.EscapeCommFunction(self._port_handle, win32.CLRDTR)
|
||||
|
||||
def _GetCommModemStatus(self):
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
stat = win32.DWORD()
|
||||
win32.GetCommModemStatus(self._port_handle, ctypes.byref(stat))
|
||||
return stat.value
|
||||
|
||||
@property
|
||||
def cts(self):
|
||||
"""Read terminal status line: Clear To Send"""
|
||||
return win32.MS_CTS_ON & self._GetCommModemStatus() != 0
|
||||
|
||||
@property
|
||||
def dsr(self):
|
||||
"""Read terminal status line: Data Set Ready"""
|
||||
return win32.MS_DSR_ON & self._GetCommModemStatus() != 0
|
||||
|
||||
@property
|
||||
def ri(self):
|
||||
"""Read terminal status line: Ring Indicator"""
|
||||
return win32.MS_RING_ON & self._GetCommModemStatus() != 0
|
||||
|
||||
@property
|
||||
def cd(self):
|
||||
"""Read terminal status line: Carrier Detect"""
|
||||
return win32.MS_RLSD_ON & self._GetCommModemStatus() != 0
|
||||
|
||||
# - - platform specific - - - -
|
||||
|
||||
def set_buffer_size(self, rx_size=4096, tx_size=None):
|
||||
"""\
|
||||
Recommend a buffer size to the driver (device driver can ignore this
|
||||
value). Must be called before the port is opened.
|
||||
"""
|
||||
if tx_size is None:
|
||||
tx_size = rx_size
|
||||
win32.SetupComm(self._port_handle, rx_size, tx_size)
|
||||
|
||||
def set_output_flow_control(self, enable=True):
|
||||
"""\
|
||||
Manually control flow - when software flow control is enabled.
|
||||
This will do the same as if XON (true) or XOFF (false) are received
|
||||
from the other device and control the transmission accordingly.
|
||||
WARNING: this function is not portable to different platforms!
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise portNotOpenError
|
||||
if enable:
|
||||
win32.EscapeCommFunction(self._port_handle, win32.SETXON)
|
||||
else:
|
||||
win32.EscapeCommFunction(self._port_handle, win32.SETXOFF)
|
||||
|
||||
@property
|
||||
def out_waiting(self):
|
||||
"""Return how many bytes the in the outgoing buffer"""
|
||||
flags = win32.DWORD()
|
||||
comstat = win32.COMSTAT()
|
||||
if not win32.ClearCommError(self._port_handle, ctypes.byref(flags), ctypes.byref(comstat)):
|
||||
raise SerialException("ClearCommError failed ({!r})".format(ctypes.WinError()))
|
||||
return comstat.cbOutQue
|
||||
|
||||
def _cancel_overlapped_io(self, overlapped):
|
||||
"""Cancel a blocking read operation, may be called from other thread"""
|
||||
# check if read operation is pending
|
||||
rc = win32.DWORD()
|
||||
err = win32.GetOverlappedResult(
|
||||
self._port_handle,
|
||||
ctypes.byref(overlapped),
|
||||
ctypes.byref(rc),
|
||||
False)
|
||||
if not err and win32.GetLastError() in (win32.ERROR_IO_PENDING, win32.ERROR_IO_INCOMPLETE):
|
||||
# cancel, ignoring any errors (e.g. it may just have finished on its own)
|
||||
win32.CancelIoEx(self._port_handle, overlapped)
|
||||
|
||||
def cancel_read(self):
|
||||
"""Cancel a blocking read operation, may be called from other thread"""
|
||||
self._cancel_overlapped_io(self._overlapped_read)
|
||||
|
||||
def cancel_write(self):
|
||||
"""Cancel a blocking write operation, may be called from other thread"""
|
||||
self._cancel_overlapped_io(self._overlapped_write)
|
||||
|
||||
@SerialBase.exclusive.setter
|
||||
def exclusive(self, exclusive):
|
||||
"""Change the exclusive access setting."""
|
||||
if exclusive is not None and not exclusive:
|
||||
raise ValueError('win32 only supports exclusive access (not: {})'.format(exclusive))
|
||||
else:
|
||||
serial.SerialBase.exclusive.__set__(self, exclusive)
|
||||
354
software/tools/pymcuprog/libs/serial/win32.py
Normal file
354
software/tools/pymcuprog/libs/serial/win32.py
Normal file
@@ -0,0 +1,354 @@
|
||||
#! python
|
||||
#
|
||||
# Constants and types for use with Windows API, used by serialwin32.py
|
||||
#
|
||||
# This file is part of pySerial. https://github.com/pyserial/pyserial
|
||||
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
# pylint: disable=invalid-name,too-few-public-methods,protected-access,too-many-instance-attributes
|
||||
|
||||
from ctypes import c_ulong, c_void_p, c_int64, c_char, \
|
||||
WinDLL, sizeof, Structure, Union, POINTER
|
||||
from ctypes.wintypes import HANDLE
|
||||
from ctypes.wintypes import BOOL
|
||||
from ctypes.wintypes import LPCWSTR
|
||||
from ctypes.wintypes import DWORD
|
||||
from ctypes.wintypes import WORD
|
||||
from ctypes.wintypes import BYTE
|
||||
_stdcall_libraries = {}
|
||||
_stdcall_libraries['kernel32'] = WinDLL('kernel32')
|
||||
|
||||
INVALID_HANDLE_VALUE = HANDLE(-1).value
|
||||
|
||||
|
||||
# some details of the windows API differ between 32 and 64 bit systems..
|
||||
def is_64bit():
|
||||
"""Returns true when running on a 64 bit system"""
|
||||
return sizeof(c_ulong) != sizeof(c_void_p)
|
||||
|
||||
# ULONG_PTR is a an ordinary number, not a pointer and contrary to the name it
|
||||
# is either 32 or 64 bits, depending on the type of windows...
|
||||
# so test if this a 32 bit windows...
|
||||
if is_64bit():
|
||||
ULONG_PTR = c_int64
|
||||
else:
|
||||
ULONG_PTR = c_ulong
|
||||
|
||||
|
||||
class _SECURITY_ATTRIBUTES(Structure):
|
||||
pass
|
||||
LPSECURITY_ATTRIBUTES = POINTER(_SECURITY_ATTRIBUTES)
|
||||
|
||||
|
||||
try:
|
||||
CreateEventW = _stdcall_libraries['kernel32'].CreateEventW
|
||||
except AttributeError:
|
||||
# Fallback to non wide char version for old OS...
|
||||
from ctypes.wintypes import LPCSTR
|
||||
CreateEventA = _stdcall_libraries['kernel32'].CreateEventA
|
||||
CreateEventA.restype = HANDLE
|
||||
CreateEventA.argtypes = [LPSECURITY_ATTRIBUTES, BOOL, BOOL, LPCSTR]
|
||||
CreateEvent = CreateEventA
|
||||
|
||||
CreateFileA = _stdcall_libraries['kernel32'].CreateFileA
|
||||
CreateFileA.restype = HANDLE
|
||||
CreateFileA.argtypes = [LPCSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
|
||||
CreateFile = CreateFileA
|
||||
else:
|
||||
CreateEventW.restype = HANDLE
|
||||
CreateEventW.argtypes = [LPSECURITY_ATTRIBUTES, BOOL, BOOL, LPCWSTR]
|
||||
CreateEvent = CreateEventW # alias
|
||||
|
||||
CreateFileW = _stdcall_libraries['kernel32'].CreateFileW
|
||||
CreateFileW.restype = HANDLE
|
||||
CreateFileW.argtypes = [LPCWSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
|
||||
CreateFile = CreateFileW # alias
|
||||
|
||||
|
||||
class _OVERLAPPED(Structure):
|
||||
pass
|
||||
|
||||
OVERLAPPED = _OVERLAPPED
|
||||
|
||||
|
||||
class _COMSTAT(Structure):
|
||||
pass
|
||||
|
||||
COMSTAT = _COMSTAT
|
||||
|
||||
|
||||
class _DCB(Structure):
|
||||
pass
|
||||
|
||||
DCB = _DCB
|
||||
|
||||
|
||||
class _COMMTIMEOUTS(Structure):
|
||||
pass
|
||||
|
||||
COMMTIMEOUTS = _COMMTIMEOUTS
|
||||
|
||||
GetLastError = _stdcall_libraries['kernel32'].GetLastError
|
||||
GetLastError.restype = DWORD
|
||||
GetLastError.argtypes = []
|
||||
|
||||
LPOVERLAPPED = POINTER(_OVERLAPPED)
|
||||
LPDWORD = POINTER(DWORD)
|
||||
|
||||
GetOverlappedResult = _stdcall_libraries['kernel32'].GetOverlappedResult
|
||||
GetOverlappedResult.restype = BOOL
|
||||
GetOverlappedResult.argtypes = [HANDLE, LPOVERLAPPED, LPDWORD, BOOL]
|
||||
|
||||
ResetEvent = _stdcall_libraries['kernel32'].ResetEvent
|
||||
ResetEvent.restype = BOOL
|
||||
ResetEvent.argtypes = [HANDLE]
|
||||
|
||||
LPCVOID = c_void_p
|
||||
|
||||
WriteFile = _stdcall_libraries['kernel32'].WriteFile
|
||||
WriteFile.restype = BOOL
|
||||
WriteFile.argtypes = [HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED]
|
||||
|
||||
LPVOID = c_void_p
|
||||
|
||||
ReadFile = _stdcall_libraries['kernel32'].ReadFile
|
||||
ReadFile.restype = BOOL
|
||||
ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
|
||||
|
||||
CloseHandle = _stdcall_libraries['kernel32'].CloseHandle
|
||||
CloseHandle.restype = BOOL
|
||||
CloseHandle.argtypes = [HANDLE]
|
||||
|
||||
ClearCommBreak = _stdcall_libraries['kernel32'].ClearCommBreak
|
||||
ClearCommBreak.restype = BOOL
|
||||
ClearCommBreak.argtypes = [HANDLE]
|
||||
|
||||
LPCOMSTAT = POINTER(_COMSTAT)
|
||||
|
||||
ClearCommError = _stdcall_libraries['kernel32'].ClearCommError
|
||||
ClearCommError.restype = BOOL
|
||||
ClearCommError.argtypes = [HANDLE, LPDWORD, LPCOMSTAT]
|
||||
|
||||
SetupComm = _stdcall_libraries['kernel32'].SetupComm
|
||||
SetupComm.restype = BOOL
|
||||
SetupComm.argtypes = [HANDLE, DWORD, DWORD]
|
||||
|
||||
EscapeCommFunction = _stdcall_libraries['kernel32'].EscapeCommFunction
|
||||
EscapeCommFunction.restype = BOOL
|
||||
EscapeCommFunction.argtypes = [HANDLE, DWORD]
|
||||
|
||||
GetCommModemStatus = _stdcall_libraries['kernel32'].GetCommModemStatus
|
||||
GetCommModemStatus.restype = BOOL
|
||||
GetCommModemStatus.argtypes = [HANDLE, LPDWORD]
|
||||
|
||||
LPDCB = POINTER(_DCB)
|
||||
|
||||
GetCommState = _stdcall_libraries['kernel32'].GetCommState
|
||||
GetCommState.restype = BOOL
|
||||
GetCommState.argtypes = [HANDLE, LPDCB]
|
||||
|
||||
LPCOMMTIMEOUTS = POINTER(_COMMTIMEOUTS)
|
||||
|
||||
GetCommTimeouts = _stdcall_libraries['kernel32'].GetCommTimeouts
|
||||
GetCommTimeouts.restype = BOOL
|
||||
GetCommTimeouts.argtypes = [HANDLE, LPCOMMTIMEOUTS]
|
||||
|
||||
PurgeComm = _stdcall_libraries['kernel32'].PurgeComm
|
||||
PurgeComm.restype = BOOL
|
||||
PurgeComm.argtypes = [HANDLE, DWORD]
|
||||
|
||||
SetCommBreak = _stdcall_libraries['kernel32'].SetCommBreak
|
||||
SetCommBreak.restype = BOOL
|
||||
SetCommBreak.argtypes = [HANDLE]
|
||||
|
||||
SetCommMask = _stdcall_libraries['kernel32'].SetCommMask
|
||||
SetCommMask.restype = BOOL
|
||||
SetCommMask.argtypes = [HANDLE, DWORD]
|
||||
|
||||
SetCommState = _stdcall_libraries['kernel32'].SetCommState
|
||||
SetCommState.restype = BOOL
|
||||
SetCommState.argtypes = [HANDLE, LPDCB]
|
||||
|
||||
SetCommTimeouts = _stdcall_libraries['kernel32'].SetCommTimeouts
|
||||
SetCommTimeouts.restype = BOOL
|
||||
SetCommTimeouts.argtypes = [HANDLE, LPCOMMTIMEOUTS]
|
||||
|
||||
WaitForSingleObject = _stdcall_libraries['kernel32'].WaitForSingleObject
|
||||
WaitForSingleObject.restype = DWORD
|
||||
WaitForSingleObject.argtypes = [HANDLE, DWORD]
|
||||
|
||||
CancelIoEx = _stdcall_libraries['kernel32'].CancelIoEx
|
||||
CancelIoEx.restype = BOOL
|
||||
CancelIoEx.argtypes = [HANDLE, LPOVERLAPPED]
|
||||
|
||||
ONESTOPBIT = 0 # Variable c_int
|
||||
TWOSTOPBITS = 2 # Variable c_int
|
||||
ONE5STOPBITS = 1
|
||||
|
||||
NOPARITY = 0 # Variable c_int
|
||||
ODDPARITY = 1 # Variable c_int
|
||||
EVENPARITY = 2 # Variable c_int
|
||||
MARKPARITY = 3
|
||||
SPACEPARITY = 4
|
||||
|
||||
RTS_CONTROL_HANDSHAKE = 2 # Variable c_int
|
||||
RTS_CONTROL_DISABLE = 0 # Variable c_int
|
||||
RTS_CONTROL_ENABLE = 1 # Variable c_int
|
||||
RTS_CONTROL_TOGGLE = 3 # Variable c_int
|
||||
SETRTS = 3
|
||||
CLRRTS = 4
|
||||
|
||||
DTR_CONTROL_HANDSHAKE = 2 # Variable c_int
|
||||
DTR_CONTROL_DISABLE = 0 # Variable c_int
|
||||
DTR_CONTROL_ENABLE = 1 # Variable c_int
|
||||
SETDTR = 5
|
||||
CLRDTR = 6
|
||||
|
||||
MS_DSR_ON = 32 # Variable c_ulong
|
||||
EV_RING = 256 # Variable c_int
|
||||
EV_PERR = 512 # Variable c_int
|
||||
EV_ERR = 128 # Variable c_int
|
||||
SETXOFF = 1 # Variable c_int
|
||||
EV_RXCHAR = 1 # Variable c_int
|
||||
GENERIC_WRITE = 1073741824 # Variable c_long
|
||||
PURGE_TXCLEAR = 4 # Variable c_int
|
||||
FILE_FLAG_OVERLAPPED = 1073741824 # Variable c_int
|
||||
EV_DSR = 16 # Variable c_int
|
||||
MAXDWORD = 4294967295 # Variable c_uint
|
||||
EV_RLSD = 32 # Variable c_int
|
||||
|
||||
ERROR_SUCCESS = 0
|
||||
ERROR_NOT_ENOUGH_MEMORY = 8
|
||||
ERROR_OPERATION_ABORTED = 995
|
||||
ERROR_IO_INCOMPLETE = 996
|
||||
ERROR_IO_PENDING = 997 # Variable c_long
|
||||
ERROR_INVALID_USER_BUFFER = 1784
|
||||
|
||||
MS_CTS_ON = 16 # Variable c_ulong
|
||||
EV_EVENT1 = 2048 # Variable c_int
|
||||
EV_RX80FULL = 1024 # Variable c_int
|
||||
PURGE_RXABORT = 2 # Variable c_int
|
||||
FILE_ATTRIBUTE_NORMAL = 128 # Variable c_int
|
||||
PURGE_TXABORT = 1 # Variable c_int
|
||||
SETXON = 2 # Variable c_int
|
||||
OPEN_EXISTING = 3 # Variable c_int
|
||||
MS_RING_ON = 64 # Variable c_ulong
|
||||
EV_TXEMPTY = 4 # Variable c_int
|
||||
EV_RXFLAG = 2 # Variable c_int
|
||||
MS_RLSD_ON = 128 # Variable c_ulong
|
||||
GENERIC_READ = 2147483648 # Variable c_ulong
|
||||
EV_EVENT2 = 4096 # Variable c_int
|
||||
EV_CTS = 8 # Variable c_int
|
||||
EV_BREAK = 64 # Variable c_int
|
||||
PURGE_RXCLEAR = 8 # Variable c_int
|
||||
INFINITE = 0xFFFFFFFF
|
||||
|
||||
|
||||
class N11_OVERLAPPED4DOLLAR_48E(Union):
|
||||
pass
|
||||
|
||||
|
||||
class N11_OVERLAPPED4DOLLAR_484DOLLAR_49E(Structure):
|
||||
pass
|
||||
|
||||
|
||||
N11_OVERLAPPED4DOLLAR_484DOLLAR_49E._fields_ = [
|
||||
('Offset', DWORD),
|
||||
('OffsetHigh', DWORD),
|
||||
]
|
||||
|
||||
PVOID = c_void_p
|
||||
|
||||
N11_OVERLAPPED4DOLLAR_48E._anonymous_ = ['_0']
|
||||
N11_OVERLAPPED4DOLLAR_48E._fields_ = [
|
||||
('_0', N11_OVERLAPPED4DOLLAR_484DOLLAR_49E),
|
||||
('Pointer', PVOID),
|
||||
]
|
||||
_OVERLAPPED._anonymous_ = ['_0']
|
||||
_OVERLAPPED._fields_ = [
|
||||
('Internal', ULONG_PTR),
|
||||
('InternalHigh', ULONG_PTR),
|
||||
('_0', N11_OVERLAPPED4DOLLAR_48E),
|
||||
('hEvent', HANDLE),
|
||||
]
|
||||
_SECURITY_ATTRIBUTES._fields_ = [
|
||||
('nLength', DWORD),
|
||||
('lpSecurityDescriptor', LPVOID),
|
||||
('bInheritHandle', BOOL),
|
||||
]
|
||||
_COMSTAT._fields_ = [
|
||||
('fCtsHold', DWORD, 1),
|
||||
('fDsrHold', DWORD, 1),
|
||||
('fRlsdHold', DWORD, 1),
|
||||
('fXoffHold', DWORD, 1),
|
||||
('fXoffSent', DWORD, 1),
|
||||
('fEof', DWORD, 1),
|
||||
('fTxim', DWORD, 1),
|
||||
('fReserved', DWORD, 25),
|
||||
('cbInQue', DWORD),
|
||||
('cbOutQue', DWORD),
|
||||
]
|
||||
_DCB._fields_ = [
|
||||
('DCBlength', DWORD),
|
||||
('BaudRate', DWORD),
|
||||
('fBinary', DWORD, 1),
|
||||
('fParity', DWORD, 1),
|
||||
('fOutxCtsFlow', DWORD, 1),
|
||||
('fOutxDsrFlow', DWORD, 1),
|
||||
('fDtrControl', DWORD, 2),
|
||||
('fDsrSensitivity', DWORD, 1),
|
||||
('fTXContinueOnXoff', DWORD, 1),
|
||||
('fOutX', DWORD, 1),
|
||||
('fInX', DWORD, 1),
|
||||
('fErrorChar', DWORD, 1),
|
||||
('fNull', DWORD, 1),
|
||||
('fRtsControl', DWORD, 2),
|
||||
('fAbortOnError', DWORD, 1),
|
||||
('fDummy2', DWORD, 17),
|
||||
('wReserved', WORD),
|
||||
('XonLim', WORD),
|
||||
('XoffLim', WORD),
|
||||
('ByteSize', BYTE),
|
||||
('Parity', BYTE),
|
||||
('StopBits', BYTE),
|
||||
('XonChar', c_char),
|
||||
('XoffChar', c_char),
|
||||
('ErrorChar', c_char),
|
||||
('EofChar', c_char),
|
||||
('EvtChar', c_char),
|
||||
('wReserved1', WORD),
|
||||
]
|
||||
_COMMTIMEOUTS._fields_ = [
|
||||
('ReadIntervalTimeout', DWORD),
|
||||
('ReadTotalTimeoutMultiplier', DWORD),
|
||||
('ReadTotalTimeoutConstant', DWORD),
|
||||
('WriteTotalTimeoutMultiplier', DWORD),
|
||||
('WriteTotalTimeoutConstant', DWORD),
|
||||
]
|
||||
__all__ = ['GetLastError', 'MS_CTS_ON', 'FILE_ATTRIBUTE_NORMAL',
|
||||
'DTR_CONTROL_ENABLE', '_COMSTAT', 'MS_RLSD_ON',
|
||||
'GetOverlappedResult', 'SETXON', 'PURGE_TXABORT',
|
||||
'PurgeComm', 'N11_OVERLAPPED4DOLLAR_48E', 'EV_RING',
|
||||
'ONESTOPBIT', 'SETXOFF', 'PURGE_RXABORT', 'GetCommState',
|
||||
'RTS_CONTROL_ENABLE', '_DCB', 'CreateEvent',
|
||||
'_COMMTIMEOUTS', '_SECURITY_ATTRIBUTES', 'EV_DSR',
|
||||
'EV_PERR', 'EV_RXFLAG', 'OPEN_EXISTING', 'DCB',
|
||||
'FILE_FLAG_OVERLAPPED', 'EV_CTS', 'SetupComm',
|
||||
'LPOVERLAPPED', 'EV_TXEMPTY', 'ClearCommBreak',
|
||||
'LPSECURITY_ATTRIBUTES', 'SetCommBreak', 'SetCommTimeouts',
|
||||
'COMMTIMEOUTS', 'ODDPARITY', 'EV_RLSD',
|
||||
'GetCommModemStatus', 'EV_EVENT2', 'PURGE_TXCLEAR',
|
||||
'EV_BREAK', 'EVENPARITY', 'LPCVOID', 'COMSTAT', 'ReadFile',
|
||||
'PVOID', '_OVERLAPPED', 'WriteFile', 'GetCommTimeouts',
|
||||
'ResetEvent', 'EV_RXCHAR', 'LPCOMSTAT', 'ClearCommError',
|
||||
'ERROR_IO_PENDING', 'EscapeCommFunction', 'GENERIC_READ',
|
||||
'RTS_CONTROL_HANDSHAKE', 'OVERLAPPED',
|
||||
'DTR_CONTROL_HANDSHAKE', 'PURGE_RXCLEAR', 'GENERIC_WRITE',
|
||||
'LPDCB', 'CreateEventW', 'SetCommMask', 'EV_EVENT1',
|
||||
'SetCommState', 'LPVOID', 'CreateFileW', 'LPDWORD',
|
||||
'EV_RX80FULL', 'TWOSTOPBITS', 'LPCOMMTIMEOUTS', 'MAXDWORD',
|
||||
'MS_DSR_ON', 'MS_RING_ON',
|
||||
'N11_OVERLAPPED4DOLLAR_484DOLLAR_49E', 'EV_ERR',
|
||||
'ULONG_PTR', 'CreateFile', 'NOPARITY', 'CloseHandle']
|
||||
312
software/tools/pymcuprog/libs/yaml/__init__.py
Normal file
312
software/tools/pymcuprog/libs/yaml/__init__.py
Normal file
@@ -0,0 +1,312 @@
|
||||
|
||||
from .error import *
|
||||
|
||||
from .tokens import *
|
||||
from .events import *
|
||||
from .nodes import *
|
||||
|
||||
from .loader import *
|
||||
from .dumper import *
|
||||
|
||||
__version__ = '3.13'
|
||||
try:
|
||||
from .cyaml import *
|
||||
__with_libyaml__ = True
|
||||
except ImportError:
|
||||
__with_libyaml__ = False
|
||||
|
||||
import io
|
||||
|
||||
def scan(stream, Loader=Loader):
|
||||
"""
|
||||
Scan a YAML stream and produce scanning tokens.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
while loader.check_token():
|
||||
yield loader.get_token()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def parse(stream, Loader=Loader):
|
||||
"""
|
||||
Parse a YAML stream and produce parsing events.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
while loader.check_event():
|
||||
yield loader.get_event()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def compose(stream, Loader=Loader):
|
||||
"""
|
||||
Parse the first YAML document in a stream
|
||||
and produce the corresponding representation tree.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
return loader.get_single_node()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def compose_all(stream, Loader=Loader):
|
||||
"""
|
||||
Parse all YAML documents in a stream
|
||||
and produce corresponding representation trees.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
while loader.check_node():
|
||||
yield loader.get_node()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def load(stream, Loader=Loader):
|
||||
"""
|
||||
Parse the first YAML document in a stream
|
||||
and produce the corresponding Python object.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
return loader.get_single_data()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def load_all(stream, Loader=Loader):
|
||||
"""
|
||||
Parse all YAML documents in a stream
|
||||
and produce corresponding Python objects.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
while loader.check_data():
|
||||
yield loader.get_data()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def safe_load(stream):
|
||||
"""
|
||||
Parse the first YAML document in a stream
|
||||
and produce the corresponding Python object.
|
||||
Resolve only basic YAML tags.
|
||||
"""
|
||||
return load(stream, SafeLoader)
|
||||
|
||||
def safe_load_all(stream):
|
||||
"""
|
||||
Parse all YAML documents in a stream
|
||||
and produce corresponding Python objects.
|
||||
Resolve only basic YAML tags.
|
||||
"""
|
||||
return load_all(stream, SafeLoader)
|
||||
|
||||
def emit(events, stream=None, Dumper=Dumper,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None):
|
||||
"""
|
||||
Emit YAML parsing events into a stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
getvalue = None
|
||||
if stream is None:
|
||||
stream = io.StringIO()
|
||||
getvalue = stream.getvalue
|
||||
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break)
|
||||
try:
|
||||
for event in events:
|
||||
dumper.emit(event)
|
||||
finally:
|
||||
dumper.dispose()
|
||||
if getvalue:
|
||||
return getvalue()
|
||||
|
||||
def serialize_all(nodes, stream=None, Dumper=Dumper,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
"""
|
||||
Serialize a sequence of representation trees into a YAML stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
getvalue = None
|
||||
if stream is None:
|
||||
if encoding is None:
|
||||
stream = io.StringIO()
|
||||
else:
|
||||
stream = io.BytesIO()
|
||||
getvalue = stream.getvalue
|
||||
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
encoding=encoding, version=version, tags=tags,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end)
|
||||
try:
|
||||
dumper.open()
|
||||
for node in nodes:
|
||||
dumper.serialize(node)
|
||||
dumper.close()
|
||||
finally:
|
||||
dumper.dispose()
|
||||
if getvalue:
|
||||
return getvalue()
|
||||
|
||||
def serialize(node, stream=None, Dumper=Dumper, **kwds):
|
||||
"""
|
||||
Serialize a representation tree into a YAML stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
return serialize_all([node], stream, Dumper=Dumper, **kwds)
|
||||
|
||||
def dump_all(documents, stream=None, Dumper=Dumper,
|
||||
default_style=None, default_flow_style=None,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
"""
|
||||
Serialize a sequence of Python objects into a YAML stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
getvalue = None
|
||||
if stream is None:
|
||||
if encoding is None:
|
||||
stream = io.StringIO()
|
||||
else:
|
||||
stream = io.BytesIO()
|
||||
getvalue = stream.getvalue
|
||||
dumper = Dumper(stream, default_style=default_style,
|
||||
default_flow_style=default_flow_style,
|
||||
canonical=canonical, indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
encoding=encoding, version=version, tags=tags,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end)
|
||||
try:
|
||||
dumper.open()
|
||||
for data in documents:
|
||||
dumper.represent(data)
|
||||
dumper.close()
|
||||
finally:
|
||||
dumper.dispose()
|
||||
if getvalue:
|
||||
return getvalue()
|
||||
|
||||
def dump(data, stream=None, Dumper=Dumper, **kwds):
|
||||
"""
|
||||
Serialize a Python object into a YAML stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
return dump_all([data], stream, Dumper=Dumper, **kwds)
|
||||
|
||||
def safe_dump_all(documents, stream=None, **kwds):
|
||||
"""
|
||||
Serialize a sequence of Python objects into a YAML stream.
|
||||
Produce only basic YAML tags.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
|
||||
|
||||
def safe_dump(data, stream=None, **kwds):
|
||||
"""
|
||||
Serialize a Python object into a YAML stream.
|
||||
Produce only basic YAML tags.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
|
||||
|
||||
def add_implicit_resolver(tag, regexp, first=None,
|
||||
Loader=Loader, Dumper=Dumper):
|
||||
"""
|
||||
Add an implicit scalar detector.
|
||||
If an implicit scalar value matches the given regexp,
|
||||
the corresponding tag is assigned to the scalar.
|
||||
first is a sequence of possible initial characters or None.
|
||||
"""
|
||||
Loader.add_implicit_resolver(tag, regexp, first)
|
||||
Dumper.add_implicit_resolver(tag, regexp, first)
|
||||
|
||||
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
|
||||
"""
|
||||
Add a path based resolver for the given tag.
|
||||
A path is a list of keys that forms a path
|
||||
to a node in the representation tree.
|
||||
Keys can be string values, integers, or None.
|
||||
"""
|
||||
Loader.add_path_resolver(tag, path, kind)
|
||||
Dumper.add_path_resolver(tag, path, kind)
|
||||
|
||||
def add_constructor(tag, constructor, Loader=Loader):
|
||||
"""
|
||||
Add a constructor for the given tag.
|
||||
Constructor is a function that accepts a Loader instance
|
||||
and a node object and produces the corresponding Python object.
|
||||
"""
|
||||
Loader.add_constructor(tag, constructor)
|
||||
|
||||
def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
|
||||
"""
|
||||
Add a multi-constructor for the given tag prefix.
|
||||
Multi-constructor is called for a node if its tag starts with tag_prefix.
|
||||
Multi-constructor accepts a Loader instance, a tag suffix,
|
||||
and a node object and produces the corresponding Python object.
|
||||
"""
|
||||
Loader.add_multi_constructor(tag_prefix, multi_constructor)
|
||||
|
||||
def add_representer(data_type, representer, Dumper=Dumper):
|
||||
"""
|
||||
Add a representer for the given type.
|
||||
Representer is a function accepting a Dumper instance
|
||||
and an instance of the given data type
|
||||
and producing the corresponding representation node.
|
||||
"""
|
||||
Dumper.add_representer(data_type, representer)
|
||||
|
||||
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
|
||||
"""
|
||||
Add a representer for the given type.
|
||||
Multi-representer is a function accepting a Dumper instance
|
||||
and an instance of the given data type or subtype
|
||||
and producing the corresponding representation node.
|
||||
"""
|
||||
Dumper.add_multi_representer(data_type, multi_representer)
|
||||
|
||||
class YAMLObjectMetaclass(type):
|
||||
"""
|
||||
The metaclass for YAMLObject.
|
||||
"""
|
||||
def __init__(cls, name, bases, kwds):
|
||||
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
|
||||
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
|
||||
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
|
||||
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
|
||||
|
||||
class YAMLObject(metaclass=YAMLObjectMetaclass):
|
||||
"""
|
||||
An object that can dump itself to a YAML stream
|
||||
and load itself from a YAML stream.
|
||||
"""
|
||||
|
||||
__slots__ = () # no direct instantiation, so allow immutable subclasses
|
||||
|
||||
yaml_loader = Loader
|
||||
yaml_dumper = Dumper
|
||||
|
||||
yaml_tag = None
|
||||
yaml_flow_style = None
|
||||
|
||||
@classmethod
|
||||
def from_yaml(cls, loader, node):
|
||||
"""
|
||||
Convert a representation node to a Python object.
|
||||
"""
|
||||
return loader.construct_yaml_object(node, cls)
|
||||
|
||||
@classmethod
|
||||
def to_yaml(cls, dumper, data):
|
||||
"""
|
||||
Convert a Python object to a representation node.
|
||||
"""
|
||||
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
|
||||
flow_style=cls.yaml_flow_style)
|
||||
|
||||
139
software/tools/pymcuprog/libs/yaml/composer.py
Normal file
139
software/tools/pymcuprog/libs/yaml/composer.py
Normal file
@@ -0,0 +1,139 @@
|
||||
|
||||
__all__ = ['Composer', 'ComposerError']
|
||||
|
||||
from .error import MarkedYAMLError
|
||||
from .events import *
|
||||
from .nodes import *
|
||||
|
||||
class ComposerError(MarkedYAMLError):
|
||||
pass
|
||||
|
||||
class Composer:
|
||||
|
||||
def __init__(self):
|
||||
self.anchors = {}
|
||||
|
||||
def check_node(self):
|
||||
# Drop the STREAM-START event.
|
||||
if self.check_event(StreamStartEvent):
|
||||
self.get_event()
|
||||
|
||||
# If there are more documents available?
|
||||
return not self.check_event(StreamEndEvent)
|
||||
|
||||
def get_node(self):
|
||||
# Get the root node of the next document.
|
||||
if not self.check_event(StreamEndEvent):
|
||||
return self.compose_document()
|
||||
|
||||
def get_single_node(self):
|
||||
# Drop the STREAM-START event.
|
||||
self.get_event()
|
||||
|
||||
# Compose a document if the stream is not empty.
|
||||
document = None
|
||||
if not self.check_event(StreamEndEvent):
|
||||
document = self.compose_document()
|
||||
|
||||
# Ensure that the stream contains no more documents.
|
||||
if not self.check_event(StreamEndEvent):
|
||||
event = self.get_event()
|
||||
raise ComposerError("expected a single document in the stream",
|
||||
document.start_mark, "but found another document",
|
||||
event.start_mark)
|
||||
|
||||
# Drop the STREAM-END event.
|
||||
self.get_event()
|
||||
|
||||
return document
|
||||
|
||||
def compose_document(self):
|
||||
# Drop the DOCUMENT-START event.
|
||||
self.get_event()
|
||||
|
||||
# Compose the root node.
|
||||
node = self.compose_node(None, None)
|
||||
|
||||
# Drop the DOCUMENT-END event.
|
||||
self.get_event()
|
||||
|
||||
self.anchors = {}
|
||||
return node
|
||||
|
||||
def compose_node(self, parent, index):
|
||||
if self.check_event(AliasEvent):
|
||||
event = self.get_event()
|
||||
anchor = event.anchor
|
||||
if anchor not in self.anchors:
|
||||
raise ComposerError(None, None, "found undefined alias %r"
|
||||
% anchor, event.start_mark)
|
||||
return self.anchors[anchor]
|
||||
event = self.peek_event()
|
||||
anchor = event.anchor
|
||||
if anchor is not None:
|
||||
if anchor in self.anchors:
|
||||
raise ComposerError("found duplicate anchor %r; first occurence"
|
||||
% anchor, self.anchors[anchor].start_mark,
|
||||
"second occurence", event.start_mark)
|
||||
self.descend_resolver(parent, index)
|
||||
if self.check_event(ScalarEvent):
|
||||
node = self.compose_scalar_node(anchor)
|
||||
elif self.check_event(SequenceStartEvent):
|
||||
node = self.compose_sequence_node(anchor)
|
||||
elif self.check_event(MappingStartEvent):
|
||||
node = self.compose_mapping_node(anchor)
|
||||
self.ascend_resolver()
|
||||
return node
|
||||
|
||||
def compose_scalar_node(self, anchor):
|
||||
event = self.get_event()
|
||||
tag = event.tag
|
||||
if tag is None or tag == '!':
|
||||
tag = self.resolve(ScalarNode, event.value, event.implicit)
|
||||
node = ScalarNode(tag, event.value,
|
||||
event.start_mark, event.end_mark, style=event.style)
|
||||
if anchor is not None:
|
||||
self.anchors[anchor] = node
|
||||
return node
|
||||
|
||||
def compose_sequence_node(self, anchor):
|
||||
start_event = self.get_event()
|
||||
tag = start_event.tag
|
||||
if tag is None or tag == '!':
|
||||
tag = self.resolve(SequenceNode, None, start_event.implicit)
|
||||
node = SequenceNode(tag, [],
|
||||
start_event.start_mark, None,
|
||||
flow_style=start_event.flow_style)
|
||||
if anchor is not None:
|
||||
self.anchors[anchor] = node
|
||||
index = 0
|
||||
while not self.check_event(SequenceEndEvent):
|
||||
node.value.append(self.compose_node(node, index))
|
||||
index += 1
|
||||
end_event = self.get_event()
|
||||
node.end_mark = end_event.end_mark
|
||||
return node
|
||||
|
||||
def compose_mapping_node(self, anchor):
|
||||
start_event = self.get_event()
|
||||
tag = start_event.tag
|
||||
if tag is None or tag == '!':
|
||||
tag = self.resolve(MappingNode, None, start_event.implicit)
|
||||
node = MappingNode(tag, [],
|
||||
start_event.start_mark, None,
|
||||
flow_style=start_event.flow_style)
|
||||
if anchor is not None:
|
||||
self.anchors[anchor] = node
|
||||
while not self.check_event(MappingEndEvent):
|
||||
#key_event = self.peek_event()
|
||||
item_key = self.compose_node(node, None)
|
||||
#if item_key in node.value:
|
||||
# raise ComposerError("while composing a mapping", start_event.start_mark,
|
||||
# "found duplicate key", key_event.start_mark)
|
||||
item_value = self.compose_node(node, item_key)
|
||||
#node.value[item_key] = item_value
|
||||
node.value.append((item_key, item_value))
|
||||
end_event = self.get_event()
|
||||
node.end_mark = end_event.end_mark
|
||||
return node
|
||||
|
||||
686
software/tools/pymcuprog/libs/yaml/constructor.py
Normal file
686
software/tools/pymcuprog/libs/yaml/constructor.py
Normal file
@@ -0,0 +1,686 @@
|
||||
|
||||
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
|
||||
'ConstructorError']
|
||||
|
||||
from .error import *
|
||||
from .nodes import *
|
||||
|
||||
import collections, datetime, base64, binascii, re, sys, types
|
||||
|
||||
class ConstructorError(MarkedYAMLError):
|
||||
pass
|
||||
|
||||
class BaseConstructor:
|
||||
|
||||
yaml_constructors = {}
|
||||
yaml_multi_constructors = {}
|
||||
|
||||
def __init__(self):
|
||||
self.constructed_objects = {}
|
||||
self.recursive_objects = {}
|
||||
self.state_generators = []
|
||||
self.deep_construct = False
|
||||
|
||||
def check_data(self):
|
||||
# If there are more documents available?
|
||||
return self.check_node()
|
||||
|
||||
def get_data(self):
|
||||
# Construct and return the next document.
|
||||
if self.check_node():
|
||||
return self.construct_document(self.get_node())
|
||||
|
||||
def get_single_data(self):
|
||||
# Ensure that the stream contains a single document and construct it.
|
||||
node = self.get_single_node()
|
||||
if node is not None:
|
||||
return self.construct_document(node)
|
||||
return None
|
||||
|
||||
def construct_document(self, node):
|
||||
data = self.construct_object(node)
|
||||
while self.state_generators:
|
||||
state_generators = self.state_generators
|
||||
self.state_generators = []
|
||||
for generator in state_generators:
|
||||
for dummy in generator:
|
||||
pass
|
||||
self.constructed_objects = {}
|
||||
self.recursive_objects = {}
|
||||
self.deep_construct = False
|
||||
return data
|
||||
|
||||
def construct_object(self, node, deep=False):
|
||||
if node in self.constructed_objects:
|
||||
return self.constructed_objects[node]
|
||||
if deep:
|
||||
old_deep = self.deep_construct
|
||||
self.deep_construct = True
|
||||
if node in self.recursive_objects:
|
||||
raise ConstructorError(None, None,
|
||||
"found unconstructable recursive node", node.start_mark)
|
||||
self.recursive_objects[node] = None
|
||||
constructor = None
|
||||
tag_suffix = None
|
||||
if node.tag in self.yaml_constructors:
|
||||
constructor = self.yaml_constructors[node.tag]
|
||||
else:
|
||||
for tag_prefix in self.yaml_multi_constructors:
|
||||
if node.tag.startswith(tag_prefix):
|
||||
tag_suffix = node.tag[len(tag_prefix):]
|
||||
constructor = self.yaml_multi_constructors[tag_prefix]
|
||||
break
|
||||
else:
|
||||
if None in self.yaml_multi_constructors:
|
||||
tag_suffix = node.tag
|
||||
constructor = self.yaml_multi_constructors[None]
|
||||
elif None in self.yaml_constructors:
|
||||
constructor = self.yaml_constructors[None]
|
||||
elif isinstance(node, ScalarNode):
|
||||
constructor = self.__class__.construct_scalar
|
||||
elif isinstance(node, SequenceNode):
|
||||
constructor = self.__class__.construct_sequence
|
||||
elif isinstance(node, MappingNode):
|
||||
constructor = self.__class__.construct_mapping
|
||||
if tag_suffix is None:
|
||||
data = constructor(self, node)
|
||||
else:
|
||||
data = constructor(self, tag_suffix, node)
|
||||
if isinstance(data, types.GeneratorType):
|
||||
generator = data
|
||||
data = next(generator)
|
||||
if self.deep_construct:
|
||||
for dummy in generator:
|
||||
pass
|
||||
else:
|
||||
self.state_generators.append(generator)
|
||||
self.constructed_objects[node] = data
|
||||
del self.recursive_objects[node]
|
||||
if deep:
|
||||
self.deep_construct = old_deep
|
||||
return data
|
||||
|
||||
def construct_scalar(self, node):
|
||||
if not isinstance(node, ScalarNode):
|
||||
raise ConstructorError(None, None,
|
||||
"expected a scalar node, but found %s" % node.id,
|
||||
node.start_mark)
|
||||
return node.value
|
||||
|
||||
def construct_sequence(self, node, deep=False):
|
||||
if not isinstance(node, SequenceNode):
|
||||
raise ConstructorError(None, None,
|
||||
"expected a sequence node, but found %s" % node.id,
|
||||
node.start_mark)
|
||||
return [self.construct_object(child, deep=deep)
|
||||
for child in node.value]
|
||||
|
||||
def construct_mapping(self, node, deep=False):
|
||||
if not isinstance(node, MappingNode):
|
||||
raise ConstructorError(None, None,
|
||||
"expected a mapping node, but found %s" % node.id,
|
||||
node.start_mark)
|
||||
mapping = {}
|
||||
for key_node, value_node in node.value:
|
||||
key = self.construct_object(key_node, deep=deep)
|
||||
if not isinstance(key, collections.Hashable):
|
||||
raise ConstructorError("while constructing a mapping", node.start_mark,
|
||||
"found unhashable key", key_node.start_mark)
|
||||
value = self.construct_object(value_node, deep=deep)
|
||||
mapping[key] = value
|
||||
return mapping
|
||||
|
||||
def construct_pairs(self, node, deep=False):
|
||||
if not isinstance(node, MappingNode):
|
||||
raise ConstructorError(None, None,
|
||||
"expected a mapping node, but found %s" % node.id,
|
||||
node.start_mark)
|
||||
pairs = []
|
||||
for key_node, value_node in node.value:
|
||||
key = self.construct_object(key_node, deep=deep)
|
||||
value = self.construct_object(value_node, deep=deep)
|
||||
pairs.append((key, value))
|
||||
return pairs
|
||||
|
||||
@classmethod
|
||||
def add_constructor(cls, tag, constructor):
|
||||
if not 'yaml_constructors' in cls.__dict__:
|
||||
cls.yaml_constructors = cls.yaml_constructors.copy()
|
||||
cls.yaml_constructors[tag] = constructor
|
||||
|
||||
@classmethod
|
||||
def add_multi_constructor(cls, tag_prefix, multi_constructor):
|
||||
if not 'yaml_multi_constructors' in cls.__dict__:
|
||||
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
|
||||
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
|
||||
|
||||
class SafeConstructor(BaseConstructor):
|
||||
|
||||
def construct_scalar(self, node):
|
||||
if isinstance(node, MappingNode):
|
||||
for key_node, value_node in node.value:
|
||||
if key_node.tag == 'tag:yaml.org,2002:value':
|
||||
return self.construct_scalar(value_node)
|
||||
return super().construct_scalar(node)
|
||||
|
||||
def flatten_mapping(self, node):
|
||||
merge = []
|
||||
index = 0
|
||||
while index < len(node.value):
|
||||
key_node, value_node = node.value[index]
|
||||
if key_node.tag == 'tag:yaml.org,2002:merge':
|
||||
del node.value[index]
|
||||
if isinstance(value_node, MappingNode):
|
||||
self.flatten_mapping(value_node)
|
||||
merge.extend(value_node.value)
|
||||
elif isinstance(value_node, SequenceNode):
|
||||
submerge = []
|
||||
for subnode in value_node.value:
|
||||
if not isinstance(subnode, MappingNode):
|
||||
raise ConstructorError("while constructing a mapping",
|
||||
node.start_mark,
|
||||
"expected a mapping for merging, but found %s"
|
||||
% subnode.id, subnode.start_mark)
|
||||
self.flatten_mapping(subnode)
|
||||
submerge.append(subnode.value)
|
||||
submerge.reverse()
|
||||
for value in submerge:
|
||||
merge.extend(value)
|
||||
else:
|
||||
raise ConstructorError("while constructing a mapping", node.start_mark,
|
||||
"expected a mapping or list of mappings for merging, but found %s"
|
||||
% value_node.id, value_node.start_mark)
|
||||
elif key_node.tag == 'tag:yaml.org,2002:value':
|
||||
key_node.tag = 'tag:yaml.org,2002:str'
|
||||
index += 1
|
||||
else:
|
||||
index += 1
|
||||
if merge:
|
||||
node.value = merge + node.value
|
||||
|
||||
def construct_mapping(self, node, deep=False):
|
||||
if isinstance(node, MappingNode):
|
||||
self.flatten_mapping(node)
|
||||
return super().construct_mapping(node, deep=deep)
|
||||
|
||||
def construct_yaml_null(self, node):
|
||||
self.construct_scalar(node)
|
||||
return None
|
||||
|
||||
bool_values = {
|
||||
'yes': True,
|
||||
'no': False,
|
||||
'true': True,
|
||||
'false': False,
|
||||
'on': True,
|
||||
'off': False,
|
||||
}
|
||||
|
||||
def construct_yaml_bool(self, node):
|
||||
value = self.construct_scalar(node)
|
||||
return self.bool_values[value.lower()]
|
||||
|
||||
def construct_yaml_int(self, node):
|
||||
value = self.construct_scalar(node)
|
||||
value = value.replace('_', '')
|
||||
sign = +1
|
||||
if value[0] == '-':
|
||||
sign = -1
|
||||
if value[0] in '+-':
|
||||
value = value[1:]
|
||||
if value == '0':
|
||||
return 0
|
||||
elif value.startswith('0b'):
|
||||
return sign*int(value[2:], 2)
|
||||
elif value.startswith('0x'):
|
||||
return sign*int(value[2:], 16)
|
||||
elif value[0] == '0':
|
||||
return sign*int(value, 8)
|
||||
elif ':' in value:
|
||||
digits = [int(part) for part in value.split(':')]
|
||||
digits.reverse()
|
||||
base = 1
|
||||
value = 0
|
||||
for digit in digits:
|
||||
value += digit*base
|
||||
base *= 60
|
||||
return sign*value
|
||||
else:
|
||||
return sign*int(value)
|
||||
|
||||
inf_value = 1e300
|
||||
while inf_value != inf_value*inf_value:
|
||||
inf_value *= inf_value
|
||||
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
|
||||
|
||||
def construct_yaml_float(self, node):
|
||||
value = self.construct_scalar(node)
|
||||
value = value.replace('_', '').lower()
|
||||
sign = +1
|
||||
if value[0] == '-':
|
||||
sign = -1
|
||||
if value[0] in '+-':
|
||||
value = value[1:]
|
||||
if value == '.inf':
|
||||
return sign*self.inf_value
|
||||
elif value == '.nan':
|
||||
return self.nan_value
|
||||
elif ':' in value:
|
||||
digits = [float(part) for part in value.split(':')]
|
||||
digits.reverse()
|
||||
base = 1
|
||||
value = 0.0
|
||||
for digit in digits:
|
||||
value += digit*base
|
||||
base *= 60
|
||||
return sign*value
|
||||
else:
|
||||
return sign*float(value)
|
||||
|
||||
def construct_yaml_binary(self, node):
|
||||
try:
|
||||
value = self.construct_scalar(node).encode('ascii')
|
||||
except UnicodeEncodeError as exc:
|
||||
raise ConstructorError(None, None,
|
||||
"failed to convert base64 data into ascii: %s" % exc,
|
||||
node.start_mark)
|
||||
try:
|
||||
if hasattr(base64, 'decodebytes'):
|
||||
return base64.decodebytes(value)
|
||||
else:
|
||||
return base64.decodestring(value)
|
||||
except binascii.Error as exc:
|
||||
raise ConstructorError(None, None,
|
||||
"failed to decode base64 data: %s" % exc, node.start_mark)
|
||||
|
||||
timestamp_regexp = re.compile(
|
||||
r'''^(?P<year>[0-9][0-9][0-9][0-9])
|
||||
-(?P<month>[0-9][0-9]?)
|
||||
-(?P<day>[0-9][0-9]?)
|
||||
(?:(?:[Tt]|[ \t]+)
|
||||
(?P<hour>[0-9][0-9]?)
|
||||
:(?P<minute>[0-9][0-9])
|
||||
:(?P<second>[0-9][0-9])
|
||||
(?:\.(?P<fraction>[0-9]*))?
|
||||
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
|
||||
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
|
||||
|
||||
def construct_yaml_timestamp(self, node):
|
||||
value = self.construct_scalar(node)
|
||||
match = self.timestamp_regexp.match(node.value)
|
||||
values = match.groupdict()
|
||||
year = int(values['year'])
|
||||
month = int(values['month'])
|
||||
day = int(values['day'])
|
||||
if not values['hour']:
|
||||
return datetime.date(year, month, day)
|
||||
hour = int(values['hour'])
|
||||
minute = int(values['minute'])
|
||||
second = int(values['second'])
|
||||
fraction = 0
|
||||
if values['fraction']:
|
||||
fraction = values['fraction'][:6]
|
||||
while len(fraction) < 6:
|
||||
fraction += '0'
|
||||
fraction = int(fraction)
|
||||
delta = None
|
||||
if values['tz_sign']:
|
||||
tz_hour = int(values['tz_hour'])
|
||||
tz_minute = int(values['tz_minute'] or 0)
|
||||
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
|
||||
if values['tz_sign'] == '-':
|
||||
delta = -delta
|
||||
data = datetime.datetime(year, month, day, hour, minute, second, fraction)
|
||||
if delta:
|
||||
data -= delta
|
||||
return data
|
||||
|
||||
def construct_yaml_omap(self, node):
|
||||
# Note: we do not check for duplicate keys, because it's too
|
||||
# CPU-expensive.
|
||||
omap = []
|
||||
yield omap
|
||||
if not isinstance(node, SequenceNode):
|
||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
||||
"expected a sequence, but found %s" % node.id, node.start_mark)
|
||||
for subnode in node.value:
|
||||
if not isinstance(subnode, MappingNode):
|
||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
||||
"expected a mapping of length 1, but found %s" % subnode.id,
|
||||
subnode.start_mark)
|
||||
if len(subnode.value) != 1:
|
||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
||||
"expected a single mapping item, but found %d items" % len(subnode.value),
|
||||
subnode.start_mark)
|
||||
key_node, value_node = subnode.value[0]
|
||||
key = self.construct_object(key_node)
|
||||
value = self.construct_object(value_node)
|
||||
omap.append((key, value))
|
||||
|
||||
def construct_yaml_pairs(self, node):
|
||||
# Note: the same code as `construct_yaml_omap`.
|
||||
pairs = []
|
||||
yield pairs
|
||||
if not isinstance(node, SequenceNode):
|
||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
||||
"expected a sequence, but found %s" % node.id, node.start_mark)
|
||||
for subnode in node.value:
|
||||
if not isinstance(subnode, MappingNode):
|
||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
||||
"expected a mapping of length 1, but found %s" % subnode.id,
|
||||
subnode.start_mark)
|
||||
if len(subnode.value) != 1:
|
||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
||||
"expected a single mapping item, but found %d items" % len(subnode.value),
|
||||
subnode.start_mark)
|
||||
key_node, value_node = subnode.value[0]
|
||||
key = self.construct_object(key_node)
|
||||
value = self.construct_object(value_node)
|
||||
pairs.append((key, value))
|
||||
|
||||
def construct_yaml_set(self, node):
|
||||
data = set()
|
||||
yield data
|
||||
value = self.construct_mapping(node)
|
||||
data.update(value)
|
||||
|
||||
def construct_yaml_str(self, node):
|
||||
return self.construct_scalar(node)
|
||||
|
||||
def construct_yaml_seq(self, node):
|
||||
data = []
|
||||
yield data
|
||||
data.extend(self.construct_sequence(node))
|
||||
|
||||
def construct_yaml_map(self, node):
|
||||
data = {}
|
||||
yield data
|
||||
value = self.construct_mapping(node)
|
||||
data.update(value)
|
||||
|
||||
def construct_yaml_object(self, node, cls):
|
||||
data = cls.__new__(cls)
|
||||
yield data
|
||||
if hasattr(data, '__setstate__'):
|
||||
state = self.construct_mapping(node, deep=True)
|
||||
data.__setstate__(state)
|
||||
else:
|
||||
state = self.construct_mapping(node)
|
||||
data.__dict__.update(state)
|
||||
|
||||
def construct_undefined(self, node):
|
||||
raise ConstructorError(None, None,
|
||||
"could not determine a constructor for the tag %r" % node.tag,
|
||||
node.start_mark)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:null',
|
||||
SafeConstructor.construct_yaml_null)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:bool',
|
||||
SafeConstructor.construct_yaml_bool)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:int',
|
||||
SafeConstructor.construct_yaml_int)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:float',
|
||||
SafeConstructor.construct_yaml_float)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:binary',
|
||||
SafeConstructor.construct_yaml_binary)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:timestamp',
|
||||
SafeConstructor.construct_yaml_timestamp)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:omap',
|
||||
SafeConstructor.construct_yaml_omap)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:pairs',
|
||||
SafeConstructor.construct_yaml_pairs)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:set',
|
||||
SafeConstructor.construct_yaml_set)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:str',
|
||||
SafeConstructor.construct_yaml_str)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:seq',
|
||||
SafeConstructor.construct_yaml_seq)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:map',
|
||||
SafeConstructor.construct_yaml_map)
|
||||
|
||||
SafeConstructor.add_constructor(None,
|
||||
SafeConstructor.construct_undefined)
|
||||
|
||||
class Constructor(SafeConstructor):
|
||||
|
||||
def construct_python_str(self, node):
|
||||
return self.construct_scalar(node)
|
||||
|
||||
def construct_python_unicode(self, node):
|
||||
return self.construct_scalar(node)
|
||||
|
||||
def construct_python_bytes(self, node):
|
||||
try:
|
||||
value = self.construct_scalar(node).encode('ascii')
|
||||
except UnicodeEncodeError as exc:
|
||||
raise ConstructorError(None, None,
|
||||
"failed to convert base64 data into ascii: %s" % exc,
|
||||
node.start_mark)
|
||||
try:
|
||||
if hasattr(base64, 'decodebytes'):
|
||||
return base64.decodebytes(value)
|
||||
else:
|
||||
return base64.decodestring(value)
|
||||
except binascii.Error as exc:
|
||||
raise ConstructorError(None, None,
|
||||
"failed to decode base64 data: %s" % exc, node.start_mark)
|
||||
|
||||
def construct_python_long(self, node):
|
||||
return self.construct_yaml_int(node)
|
||||
|
||||
def construct_python_complex(self, node):
|
||||
return complex(self.construct_scalar(node))
|
||||
|
||||
def construct_python_tuple(self, node):
|
||||
return tuple(self.construct_sequence(node))
|
||||
|
||||
def find_python_module(self, name, mark):
|
||||
if not name:
|
||||
raise ConstructorError("while constructing a Python module", mark,
|
||||
"expected non-empty name appended to the tag", mark)
|
||||
try:
|
||||
__import__(name)
|
||||
except ImportError as exc:
|
||||
raise ConstructorError("while constructing a Python module", mark,
|
||||
"cannot find module %r (%s)" % (name, exc), mark)
|
||||
return sys.modules[name]
|
||||
|
||||
def find_python_name(self, name, mark):
|
||||
if not name:
|
||||
raise ConstructorError("while constructing a Python object", mark,
|
||||
"expected non-empty name appended to the tag", mark)
|
||||
if '.' in name:
|
||||
module_name, object_name = name.rsplit('.', 1)
|
||||
else:
|
||||
module_name = 'builtins'
|
||||
object_name = name
|
||||
try:
|
||||
__import__(module_name)
|
||||
except ImportError as exc:
|
||||
raise ConstructorError("while constructing a Python object", mark,
|
||||
"cannot find module %r (%s)" % (module_name, exc), mark)
|
||||
module = sys.modules[module_name]
|
||||
if not hasattr(module, object_name):
|
||||
raise ConstructorError("while constructing a Python object", mark,
|
||||
"cannot find %r in the module %r"
|
||||
% (object_name, module.__name__), mark)
|
||||
return getattr(module, object_name)
|
||||
|
||||
def construct_python_name(self, suffix, node):
|
||||
value = self.construct_scalar(node)
|
||||
if value:
|
||||
raise ConstructorError("while constructing a Python name", node.start_mark,
|
||||
"expected the empty value, but found %r" % value, node.start_mark)
|
||||
return self.find_python_name(suffix, node.start_mark)
|
||||
|
||||
def construct_python_module(self, suffix, node):
|
||||
value = self.construct_scalar(node)
|
||||
if value:
|
||||
raise ConstructorError("while constructing a Python module", node.start_mark,
|
||||
"expected the empty value, but found %r" % value, node.start_mark)
|
||||
return self.find_python_module(suffix, node.start_mark)
|
||||
|
||||
def make_python_instance(self, suffix, node,
|
||||
args=None, kwds=None, newobj=False):
|
||||
if not args:
|
||||
args = []
|
||||
if not kwds:
|
||||
kwds = {}
|
||||
cls = self.find_python_name(suffix, node.start_mark)
|
||||
if newobj and isinstance(cls, type):
|
||||
return cls.__new__(cls, *args, **kwds)
|
||||
else:
|
||||
return cls(*args, **kwds)
|
||||
|
||||
def set_python_instance_state(self, instance, state):
|
||||
if hasattr(instance, '__setstate__'):
|
||||
instance.__setstate__(state)
|
||||
else:
|
||||
slotstate = {}
|
||||
if isinstance(state, tuple) and len(state) == 2:
|
||||
state, slotstate = state
|
||||
if hasattr(instance, '__dict__'):
|
||||
instance.__dict__.update(state)
|
||||
elif state:
|
||||
slotstate.update(state)
|
||||
for key, value in slotstate.items():
|
||||
setattr(object, key, value)
|
||||
|
||||
def construct_python_object(self, suffix, node):
|
||||
# Format:
|
||||
# !!python/object:module.name { ... state ... }
|
||||
instance = self.make_python_instance(suffix, node, newobj=True)
|
||||
yield instance
|
||||
deep = hasattr(instance, '__setstate__')
|
||||
state = self.construct_mapping(node, deep=deep)
|
||||
self.set_python_instance_state(instance, state)
|
||||
|
||||
def construct_python_object_apply(self, suffix, node, newobj=False):
|
||||
# Format:
|
||||
# !!python/object/apply # (or !!python/object/new)
|
||||
# args: [ ... arguments ... ]
|
||||
# kwds: { ... keywords ... }
|
||||
# state: ... state ...
|
||||
# listitems: [ ... listitems ... ]
|
||||
# dictitems: { ... dictitems ... }
|
||||
# or short format:
|
||||
# !!python/object/apply [ ... arguments ... ]
|
||||
# The difference between !!python/object/apply and !!python/object/new
|
||||
# is how an object is created, check make_python_instance for details.
|
||||
if isinstance(node, SequenceNode):
|
||||
args = self.construct_sequence(node, deep=True)
|
||||
kwds = {}
|
||||
state = {}
|
||||
listitems = []
|
||||
dictitems = {}
|
||||
else:
|
||||
value = self.construct_mapping(node, deep=True)
|
||||
args = value.get('args', [])
|
||||
kwds = value.get('kwds', {})
|
||||
state = value.get('state', {})
|
||||
listitems = value.get('listitems', [])
|
||||
dictitems = value.get('dictitems', {})
|
||||
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
|
||||
if state:
|
||||
self.set_python_instance_state(instance, state)
|
||||
if listitems:
|
||||
instance.extend(listitems)
|
||||
if dictitems:
|
||||
for key in dictitems:
|
||||
instance[key] = dictitems[key]
|
||||
return instance
|
||||
|
||||
def construct_python_object_new(self, suffix, node):
|
||||
return self.construct_python_object_apply(suffix, node, newobj=True)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/none',
|
||||
Constructor.construct_yaml_null)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/bool',
|
||||
Constructor.construct_yaml_bool)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/str',
|
||||
Constructor.construct_python_str)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/unicode',
|
||||
Constructor.construct_python_unicode)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/bytes',
|
||||
Constructor.construct_python_bytes)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/int',
|
||||
Constructor.construct_yaml_int)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/long',
|
||||
Constructor.construct_python_long)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/float',
|
||||
Constructor.construct_yaml_float)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/complex',
|
||||
Constructor.construct_python_complex)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/list',
|
||||
Constructor.construct_yaml_seq)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/tuple',
|
||||
Constructor.construct_python_tuple)
|
||||
|
||||
Constructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/dict',
|
||||
Constructor.construct_yaml_map)
|
||||
|
||||
Constructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/name:',
|
||||
Constructor.construct_python_name)
|
||||
|
||||
Constructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/module:',
|
||||
Constructor.construct_python_module)
|
||||
|
||||
Constructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/object:',
|
||||
Constructor.construct_python_object)
|
||||
|
||||
Constructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/object/apply:',
|
||||
Constructor.construct_python_object_apply)
|
||||
|
||||
Constructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/object/new:',
|
||||
Constructor.construct_python_object_new)
|
||||
|
||||
85
software/tools/pymcuprog/libs/yaml/cyaml.py
Normal file
85
software/tools/pymcuprog/libs/yaml/cyaml.py
Normal file
@@ -0,0 +1,85 @@
|
||||
|
||||
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
|
||||
'CBaseDumper', 'CSafeDumper', 'CDumper']
|
||||
|
||||
from _yaml import CParser, CEmitter
|
||||
|
||||
from .constructor import *
|
||||
|
||||
from .serializer import *
|
||||
from .representer import *
|
||||
|
||||
from .resolver import *
|
||||
|
||||
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
CParser.__init__(self, stream)
|
||||
BaseConstructor.__init__(self)
|
||||
BaseResolver.__init__(self)
|
||||
|
||||
class CSafeLoader(CParser, SafeConstructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
CParser.__init__(self, stream)
|
||||
SafeConstructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CLoader(CParser, Constructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
CParser.__init__(self, stream)
|
||||
Constructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=None,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
CEmitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width, encoding=encoding,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
Representer.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=None,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
CEmitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width, encoding=encoding,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
SafeRepresenter.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CDumper(CEmitter, Serializer, Representer, Resolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=None,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
CEmitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width, encoding=encoding,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
Representer.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style)
|
||||
Resolver.__init__(self)
|
||||
|
||||
62
software/tools/pymcuprog/libs/yaml/dumper.py
Normal file
62
software/tools/pymcuprog/libs/yaml/dumper.py
Normal file
@@ -0,0 +1,62 @@
|
||||
|
||||
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
|
||||
|
||||
from .emitter import *
|
||||
from .serializer import *
|
||||
from .representer import *
|
||||
from .resolver import *
|
||||
|
||||
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=None,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
Emitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break)
|
||||
Serializer.__init__(self, encoding=encoding,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
Representer.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=None,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
Emitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break)
|
||||
Serializer.__init__(self, encoding=encoding,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
SafeRepresenter.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class Dumper(Emitter, Serializer, Representer, Resolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=None,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
Emitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break)
|
||||
Serializer.__init__(self, encoding=encoding,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
Representer.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style)
|
||||
Resolver.__init__(self)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user