Skip to content

Commit 7cdd9e5

Browse files
committed
scripts: cogeno: edts: add extended DTS database module
The Extended Device Tree Specification database collates device tree (dts) information with information taken from the device tree bindings. The EDTS database may be loaded from a json file, stored to a json file or extracted from the DTS files and the bindings yaml files. The database is integrated into cogeno as a module. The commit integrates database development done in zephyrproject-rtos#9876 which was based on zephyrproject-rtos#6762. Major differences/ improvements to zephyrproject-rtos#9876 are: - the database now has an own extraction function that can be used instead of e.g. extract_dts_includes. The extraction function follows the design of the extract_dts_includes script and the additions that were done in zephyrproject-rtos#9876. It is restructured and several globals are now classes and objects. All functionality of extract_dts_includes related to the generation of defines is not part of the database extract function. It's sole purpose is to fill the database directly from the compiled DTS file. - the database got itś own directory 'edtsdb' to structure all files related to the database. - The EDTSDevice class from zephyrproject-rtos#9876 was enhanced to allow devices to access the database they are taken from. Mayor compatibility issues to zephyrproject-rtos#9876. - The consumer, provider API and the internal structure of the database is copied from zephyrproject-rtos#9876. - API should be fully compatible. - Extraction of children was replaced as it broke the concept of the devices struct as a list of devices. The functions device.get_children() and device.get_parent() may be used to acess the parent<->child relation. - The EDTSDevice class is copied from zephyrproject-rtos#9876. - The device API should be compatible except for - the constructor which now needs the EDTS database and - the unique id feature. To ge an unique id the device.get_name() function can be used instead. Signed off from zephyrproject-rtos#9876 added to attribute for the changes done there and copied. Signed-off-by: Erwan Gouriou <[email protected]> Signed-off-by: Kumar Gala <[email protected]> Signed-off-by: Bobby Noelte <[email protected]>
1 parent d669fe0 commit 7cdd9e5

38 files changed

+4741
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#
2+
# Copyright (c) 2017 Bobby Noelte
3+
#
4+
# SPDX-License-Identifier: Apache-2.0
5+
#
6+
7+
# Empty to allow all modules to be imported
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
#!/usr/bin/env python3
2+
#
3+
# Copyright (c) 2018 Bobby Noelte
4+
# Copyright (c) 2018 Linaro Limited
5+
#
6+
# SPDX-License-Identifier: Apache-2.0
7+
#
8+
9+
import sys
10+
import argparse
11+
from pathlib import Path
12+
from pprint import pprint
13+
14+
##
15+
# Make relative import work also with __main__
16+
if __package__ is None or __package__ == '':
17+
# use current directory visibility
18+
from edtsdb.database import EDTSDb
19+
else:
20+
# use current package visibility
21+
from .edtsdb.database import EDTSDb
22+
23+
24+
##
25+
# @brief Extended DTS database
26+
#
27+
class EDTSDatabase(EDTSDb):
28+
29+
@staticmethod
30+
def is_valid_directory(parser, arg):
31+
try:
32+
path = Path(arg).resolve()
33+
except:
34+
path = Path(arg)
35+
if not path.is_dir():
36+
parser.error('The directory {} does not exist!'.format(path))
37+
else:
38+
# File directory exists so return the directory
39+
return str(path)
40+
41+
@staticmethod
42+
def is_valid_file(parser, arg):
43+
try:
44+
path = Path(arg).resolve()
45+
except:
46+
path = Path(arg)
47+
if not path.is_file():
48+
parser.error('The file {} does not exist!'.format(path))
49+
else:
50+
# File exists so return the file
51+
return str(path)
52+
53+
def __init__(self, *args, **kw):
54+
super().__init__(*args, **kw)
55+
56+
def callable_main(self, args):
57+
self._parser = argparse.ArgumentParser(
58+
description='Extended Device Tree Specification Database.')
59+
self._parser.add_argument('-l', '--load', nargs=1, metavar='FILE',
60+
dest='load_file', action='store',
61+
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
62+
help='Load the input from FILE.')
63+
self._parser.add_argument('-s', '--save', nargs=1, metavar='FILE',
64+
dest='save_file', action='store',
65+
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
66+
help='Save the database to Json FILE.')
67+
self._parser.add_argument('-i', '--export-header', nargs=1, metavar='FILE',
68+
dest='export_header', action='store',
69+
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
70+
help='Export the database to header FILE.')
71+
self._parser.add_argument('-e', '--extract', nargs=1, metavar='FILE',
72+
dest='extract_file', action='store',
73+
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
74+
help='Extract the database from dts FILE.')
75+
self._parser.add_argument('-b', '--bindings', nargs='+', metavar='DIR',
76+
dest='bindings_dirs', action='store',
77+
type=lambda x: EDTSDatabase.is_valid_directory(self._parser, x),
78+
help='Use bindings from bindings DIR for extraction.' +
79+
' We allow multiple')
80+
self._parser.add_argument('-p', '--print',
81+
dest='print_it', action='store_true',
82+
help='Print EDTS database content.')
83+
84+
args = self._parser.parse_args(args)
85+
86+
if args.load_file is not None:
87+
self.load(args.load_file[0])
88+
if args.extract_file is not None:
89+
self.extract(args.extract_file[0], args.bindings_dirs)
90+
if args.save_file is not None:
91+
self.save(args.save_file[0])
92+
if args.export_header is not None:
93+
self.export_header(args.export_header[0])
94+
if args.print_it:
95+
pprint(self._edts)
96+
97+
return 0
98+
99+
def main():
100+
EDTSDatabase().callable_main(sys.argv[1:])
101+
102+
if __name__ == '__main__':
103+
main()
104+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#
2+
# Copyright (c) 2017 Bobby Noelte
3+
#
4+
# SPDX-License-Identifier: Apache-2.0
5+
#
6+
7+
# Empty to allow all modules to be imported
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,208 @@
1+
#!/usr/bin/env python3
2+
#
3+
# Copyright (c) 2017, Linaro Limited
4+
# Copyright (c) 2018, Bobby Noelte
5+
#
6+
# SPDX-License-Identifier: Apache-2.0
7+
#
8+
9+
import os, fnmatch
10+
import re
11+
import yaml
12+
from collections.abc import Mapping
13+
14+
from pathlib import Path
15+
16+
class Binder(yaml.Loader):
17+
18+
##
19+
# List of all yaml files available for yaml loaders
20+
# of this class. Must be preset before the first
21+
# load operation.
22+
_files = []
23+
24+
##
25+
# Files that are already included.
26+
# Must be reset on the load of every new binding
27+
_included = []
28+
29+
@classmethod
30+
def _merge_binding_dicts(cls, parent, fname, dct, merge_dct):
31+
# from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
32+
33+
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
34+
updating only top-level keys, _merge_binding_dicts recurses down into dicts nested
35+
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
36+
``dct``.
37+
:param parent: parent tuple key
38+
:param fname: yaml file being processed
39+
:param dct: dict onto which the merge is executed
40+
:param merge_dct: dct merged into dct
41+
:return: None
42+
"""
43+
for k, v in merge_dct.items():
44+
if (k in dct and isinstance(dct[k], dict)
45+
and isinstance(merge_dct[k], Mapping)):
46+
Binder._merge_binding_dicts(k, fname, dct[k], merge_dct[k])
47+
else:
48+
if k in dct and dct[k] != merge_dct[k]:
49+
# type, title, description, version of inherited node
50+
# are overwritten by intention.
51+
if k == 'type':
52+
# collect types
53+
if not isinstance(dct['type'], list):
54+
dct['type'] = [dct['type'],]
55+
if isinstance(merge_dct['type'], list):
56+
dct['type'].extend(merge_dct['type'])
57+
else:
58+
dct['type'].append(merge_dct['type'])
59+
continue
60+
elif k in ("title", "version", "description"):
61+
# do not warn
62+
pass
63+
elif (k == "category") and (merge_dct[k] == "required") \
64+
and (dct[k] == "optional"):
65+
# do not warn
66+
pass
67+
else:
68+
print("binder.py: {}('{}') merge of property '{}': "
69+
"'{}' overwrites '{}'."
70+
.format(fname, parent, k, merge_dct[k], dct[k]))
71+
dct[k] = merge_dct[k]
72+
73+
@classmethod
74+
def _traverse_inherited(cls, fname, node):
75+
""" Recursive overload procedure inside ``node``
76+
``inherits`` section is searched for and used as node base when found.
77+
Base values are then overloaded by node values
78+
and some consistency checks are done.
79+
:param fname: initial yaml file being processed
80+
:param node:
81+
:return: node
82+
"""
83+
84+
# do some consistency checks. Especially id is needed for further
85+
# processing. title must be first to check.
86+
if 'title' not in node:
87+
# If 'title' is missing, make fault finding more easy.
88+
# Give a hint what node we are looking at.
89+
print("binder.py: {} node without 'title' -", fname, node)
90+
for prop in ('title', 'version', 'description'):
91+
if prop not in node:
92+
node[prop] = "<unknown {}>".format(prop)
93+
print("binder.py: {} WARNING:".format(fname),
94+
"'{}' property missing in".format(prop),
95+
"'{}' binding. Using '{}'.".format(node['title'],
96+
node[prop]))
97+
98+
# warn if we have an 'id' field
99+
if 'id' in node:
100+
print("binder.py: {} WARNING: id field set".format(fname),
101+
"in '{}', should be removed.".format(node['title']))
102+
103+
if 'inherits' in node:
104+
if isinstance(node['inherits'], list):
105+
inherits_list = node['inherits']
106+
else:
107+
inherits_list = [node['inherits'],]
108+
node.pop('inherits')
109+
for inherits in inherits_list:
110+
if 'inherits' in inherits:
111+
inherits = cls._traverse_inherited(fname, inherits)
112+
cls._merge_binding_dicts(None, fname, inherits, node)
113+
node = inherits
114+
return node
115+
116+
##
117+
# @brief Get bindings for given compatibles.
118+
#
119+
# @param compatibles
120+
# @param bindings_paths directories to search for binding files
121+
# @return dictionary of bindings found
122+
@classmethod
123+
def bindings(cls, compatibles, bindings_paths):
124+
# find unique set of compatibles across all active nodes
125+
s = set()
126+
for k, v in compatibles.items():
127+
if isinstance(v, list):
128+
for item in v:
129+
s.add(item)
130+
else:
131+
s.add(v)
132+
133+
# scan YAML files and find the ones we are interested in
134+
# We add our own bindings directory first (highest priority)
135+
# We only allow one binding file with the same name
136+
bindings_paths.insert(0, Path(Path(__file__).resolve().parent,
137+
'bindings'))
138+
cls._files = []
139+
binding_files = []
140+
for path in bindings_paths:
141+
for root, dirnames, filenames in os.walk(str(path)):
142+
for filename in fnmatch.filter(filenames, '*.yaml'):
143+
if not filename in binding_files:
144+
binding_files.append(filename)
145+
cls._files.append(os.path.join(root, filename))
146+
147+
bindings_list = {}
148+
file_load_list = set()
149+
for file in cls._files:
150+
for line in open(file, 'r', encoding='utf-8'):
151+
if re.search('^\s+constraint:*', line):
152+
c = line.split(':')[1].strip()
153+
c = c.strip('"')
154+
if c in s:
155+
if file not in file_load_list:
156+
file_load_list.add(file)
157+
with open(file, 'r', encoding='utf-8') as yf:
158+
cls._included = []
159+
# collapse the bindings inherited information before return
160+
bindings_list[c] = cls._traverse_inherited(file, yaml.load(yf, cls))
161+
162+
return bindings_list
163+
164+
def __init__(self, stream):
165+
filepath = os.path.realpath(stream.name)
166+
if filepath in self._included:
167+
print("Error:: circular inclusion for file name '{}'".
168+
format(stream.name))
169+
raise yaml.constructor.ConstructorError
170+
self._included.append(filepath)
171+
super(Binder, self).__init__(stream)
172+
Binder.add_constructor('!include', Binder._include)
173+
Binder.add_constructor('!import', Binder._include)
174+
175+
def _include(self, node):
176+
if isinstance(node, yaml.ScalarNode):
177+
return self._extract_file(self.construct_scalar(node))
178+
179+
elif isinstance(node, yaml.SequenceNode):
180+
result = []
181+
for filename in self.construct_sequence(node):
182+
result.append(self._extract_file(filename))
183+
return result
184+
185+
elif isinstance(node, yaml.MappingNode):
186+
result = {}
187+
for k, v in self.construct_mapping(node).iteritems():
188+
result[k] = self._extract_file(v)
189+
return result
190+
191+
else:
192+
print("Error: unrecognised node type in !include statement")
193+
raise yaml.constructor.ConstructorError
194+
195+
def _extract_file(self, filename):
196+
filepaths = [filepath for filepath in self._files
197+
if os.path.basename(filepath) == filename]
198+
if len(filepaths) == 0:
199+
print("Error: unknown file name '{}' in !include statement".
200+
format(filename))
201+
raise yaml.constructor.ConstructorError
202+
elif len(filepaths) > 1:
203+
# multiple candidates for filename
204+
print("Warning: multiple candidates for file name "
205+
"'{}' in !include statement - using first of".
206+
format(filename), filepaths)
207+
with open(filepaths[0], 'r', encoding='utf-8') as f:
208+
return yaml.load(f, Binder)

0 commit comments

Comments
 (0)