Skip to content

Commit bd05cb5

Browse files
committed
scripts: cogeno: edts: add extended DTS database module
The Extended Device Tree Specification database collates device tree (dts) information with information taken from the device tree bindings. The EDTS database may be loaded from a json file, stored to a json file or extracted from the DTS files and the bindings yaml files. The database is integrated into cogeno as a module. The commit integrates database development done in zephyrproject-rtos#9876 which was based on zephyrproject-rtos#6762. Major differences/ improvements to zephyrproject-rtos#9876 are: - the database now has an own extraction function that can be used instead of e.g. extract_dts_includes. The extraction function follows the design of the extract_dts_includes script and the additions that were done in zephyrproject-rtos#9876. It is restructured and several globals are now classes and objects. All functionality of extract_dts_includes related to the generation of defines is not part of the database extract function. It's sole purpose is to fill the database directly from the compiled DTS file. - the database got itś own directory 'edtsdb' to structure all files related to the database. - The EDTSDevice class from zephyrproject-rtos#9876 was enhanced to allow devices to access the database they are taken from. Mayor compatibility issues to zephyrproject-rtos#9876. - The consumer, provider API and the internal structure of the database is copied from zephyrproject-rtos#9876. - API should be fully compatible. - Extraction of children was replaced as it broke the concept of the devices struct as a list of devices. The functions device.get_children() and device.get_parent() may be used to acess the parent<->child relation. - The EDTSDevice class is copied from zephyrproject-rtos#9876. - The device API should be compatible except for - the constructor which now needs the EDTS database and - the unique id feature. To ge an unique id the device.get_name() function can be used instead. Signed off from zephyrproject-rtos#9876 added to attribute for the changes done there and copied. Signed-off-by: Erwan Gouriou <[email protected]> Signed-off-by: Kumar Gala <[email protected]> Signed-off-by: Bobby Noelte <[email protected]>
1 parent 0dd210c commit bd05cb5

31 files changed

+3247
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#
2+
# Copyright (c) 2017 Bobby Noelte
3+
#
4+
# SPDX-License-Identifier: Apache-2.0
5+
#
6+
7+
# Empty to allow all modules to be imported
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
#!/usr/bin/env python3
2+
#
3+
# Copyright (c) 2018 Bobby Noelte
4+
# Copyright (c) 2018 Linaro Limited
5+
#
6+
# SPDX-License-Identifier: Apache-2.0
7+
#
8+
9+
import sys
10+
import argparse
11+
from pathlib import Path
12+
from pprint import pprint
13+
14+
##
15+
# Make relative import work also with __main__
16+
if __package__ is None or __package__ == '':
17+
# use current directory visibility
18+
from edtsdb.database import EDTSDb
19+
from edtsdb.device import EDTSDevice
20+
else:
21+
# use current package visibility
22+
from .edtsdb.database import EDTSDb
23+
from .edtsdb.device import EDTSDevice
24+
25+
26+
##
27+
# @brief Extended DTS database
28+
#
29+
class EDTSDatabase(EDTSDb):
30+
31+
@staticmethod
32+
def is_valid_directory(parser, arg):
33+
try:
34+
path = Path(arg).resolve()
35+
except:
36+
path = Path(arg)
37+
if not path.is_dir():
38+
parser.error('The directory {} does not exist!'.format(path))
39+
else:
40+
# File directory exists so return the directory
41+
return str(path)
42+
43+
@staticmethod
44+
def is_valid_file(parser, arg):
45+
try:
46+
path = Path(arg).resolve()
47+
except:
48+
path = Path(arg)
49+
if not path.is_file():
50+
parser.error('The file {} does not exist!'.format(path))
51+
else:
52+
# File exists so return the file
53+
return str(path)
54+
55+
def __init__(self, *args, **kw):
56+
super().__init__(*args, **kw)
57+
58+
def callable_main(self, args):
59+
self._parser = argparse.ArgumentParser(
60+
description='Extended Device Tree Specification Database.')
61+
self._parser.add_argument('-l', '--load', nargs=1, metavar='FILE',
62+
dest='load_file', action='store',
63+
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
64+
help='Load the input from FILE.')
65+
self._parser.add_argument('-s', '--save', nargs=1, metavar='FILE',
66+
dest='save_file', action='store',
67+
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
68+
help='Save the database to Json FILE.')
69+
self._parser.add_argument('-i', '--export-header', nargs=1, metavar='FILE',
70+
dest='export_header', action='store',
71+
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
72+
help='Export the database to header FILE.')
73+
self._parser.add_argument('-e', '--extract', nargs=1, metavar='FILE',
74+
dest='extract_file', action='store',
75+
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
76+
help='Extract the database from dts FILE.')
77+
self._parser.add_argument('-b', '--bindings', nargs='+', metavar='DIR',
78+
dest='bindings_dirs', action='store',
79+
type=lambda x: EDTSDatabase.is_valid_directory(self._parser, x),
80+
help='Use bindings from bindings DIR for extraction.' +
81+
' We allow multiple')
82+
self._parser.add_argument('-p', '--print',
83+
dest='print_it', action='store_true',
84+
help='Print EDTS database content.')
85+
86+
args = self._parser.parse_args(args)
87+
88+
if args.load_file is not None:
89+
self.load(args.load_file[0])
90+
if args.extract_file is not None:
91+
self.extract(args.extract_file[0], args.bindings_dirs)
92+
if args.save_file is not None:
93+
self.save(args.save_file[0])
94+
if args.export_header is not None:
95+
self.export_header(args.export_header[0])
96+
if args.print_it:
97+
pprint(self._edts)
98+
99+
return 0
100+
101+
def main():
102+
EDTSDatabase().callable_main(sys.argv[1:])
103+
104+
if __name__ == '__main__':
105+
main()
106+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#
2+
# Copyright (c) 2017 Bobby Noelte
3+
#
4+
# SPDX-License-Identifier: Apache-2.0
5+
#
6+
7+
# Empty to allow all modules to be imported
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,217 @@
1+
#!/usr/bin/env python3
2+
#
3+
# Copyright (c) 2017, Linaro Limited
4+
# Copyright (c) 2018, Bobby Noelte
5+
#
6+
# SPDX-License-Identifier: Apache-2.0
7+
#
8+
9+
import os, fnmatch
10+
import re
11+
import yaml
12+
import collections
13+
14+
from pathlib import Path
15+
16+
class Binder(yaml.Loader):
17+
18+
##
19+
# List of all yaml files available for yaml loaders
20+
# of this class. Must be preset before the first
21+
# load operation.
22+
_files = []
23+
24+
##
25+
# Files that are already included.
26+
# Must be reset on the load of every new binding
27+
_included = []
28+
29+
@staticmethod
30+
def dict_merge(dct, merge_dct):
31+
# from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
32+
33+
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
34+
updating only top-level keys, dict_merge recurses down into dicts nested
35+
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
36+
``dct``.
37+
:param dct: dict onto which the merge is executed
38+
:param merge_dct: dct merged into dct
39+
:return: None
40+
"""
41+
for k, v in merge_dct.items():
42+
if (k in dct and isinstance(dct[k], dict)
43+
and isinstance(merge_dct[k], collections.Mapping)):
44+
Binder.dict_merge(dct[k], merge_dct[k])
45+
else:
46+
if k in dct and dct[k] != merge_dct[k]:
47+
print("binder.py: Merge of '{}': '{}' overwrites '{}'.".format(
48+
k, merge_dct[k], dct[k]))
49+
dct[k] = merge_dct[k]
50+
51+
@classmethod
52+
def _traverse_inherited(cls, node):
53+
""" Recursive overload procedure inside ``node``
54+
``inherits`` section is searched for and used as node base when found.
55+
Base values are then overloaded by node values
56+
and some consistency checks are done.
57+
:param node:
58+
:return: node
59+
"""
60+
61+
# do some consistency checks. Especially id is needed for further
62+
# processing. title must be first to check.
63+
if 'title' not in node:
64+
# If 'title' is missing, make fault finding more easy.
65+
# Give a hint what node we are looking at.
66+
print("binder.py: node without 'title' -", node)
67+
for prop in ('title', 'version', 'description'):
68+
if prop not in node:
69+
node[prop] = "<unknown {}>".format(prop)
70+
print("binder.py: WARNING:",
71+
"'{}' property missing in".format(prop),
72+
"'{}' binding. Using '{}'.".format(node['title'],
73+
node[prop]))
74+
75+
# warn if we have an 'id' field
76+
if 'id' in node:
77+
print("binder.py: WARNING: id field set",
78+
"in '{}', should be removed.".format(node['title']))
79+
80+
if 'inherits' in node:
81+
if isinstance(node['inherits'], list):
82+
inherits_list = node['inherits']
83+
else:
84+
inherits_list = [node['inherits'],]
85+
node.pop('inherits')
86+
for inherits in inherits_list:
87+
if 'inherits' in inherits:
88+
inherits = cls._traverse_inherited(inherits)
89+
if 'type' in inherits:
90+
if 'type' not in node:
91+
node['type'] = []
92+
if not isinstance(node['type'], list):
93+
node['type'] = [node['type'],]
94+
if isinstance(inherits['type'], list):
95+
node['type'].extend(inherits['type'])
96+
else:
97+
node['type'].append(inherits['type'])
98+
99+
# type, title, description, version of inherited node
100+
# are overwritten by intention. Remove to prevent dct_merge to
101+
# complain about duplicates.
102+
inherits.pop('type', None)
103+
inherits.pop('title', None)
104+
inherits.pop('version', None)
105+
inherits.pop('description', None)
106+
cls.dict_merge(inherits, node)
107+
node = inherits
108+
return node
109+
110+
@classmethod
111+
def _collapse_inherited(cls, bindings_list):
112+
collapsed = dict(bindings_list)
113+
114+
for k, v in collapsed.items():
115+
v = cls._traverse_inherited(v)
116+
collapsed[k]=v
117+
118+
return collapsed
119+
120+
##
121+
# @brief Get bindings for given compatibles.
122+
#
123+
# @param compatibles
124+
# @param bindings_paths directories to search for binding files
125+
# @return dictionary of bindings found
126+
@classmethod
127+
def bindings(cls, compatibles, bindings_paths):
128+
# find unique set of compatibles across all active nodes
129+
s = set()
130+
for k, v in compatibles.items():
131+
if isinstance(v, list):
132+
for item in v:
133+
s.add(item)
134+
else:
135+
s.add(v)
136+
137+
# scan YAML files and find the ones we are interested in
138+
# We add our own bindings directory last (lowest priority)
139+
# We only allow one binding file with the same name
140+
bindings_paths.append(Path(Path(__file__).resolve().parent,
141+
'bindings'))
142+
cls._files = []
143+
binding_files = []
144+
for path in bindings_paths:
145+
for root, dirnames, filenames in os.walk(str(path)):
146+
for filename in fnmatch.filter(filenames, '*.yaml'):
147+
if not filename in binding_files:
148+
binding_files.append(filename)
149+
cls._files.append(os.path.join(root, filename))
150+
151+
bindings_list = {}
152+
file_load_list = set()
153+
for file in cls._files:
154+
for line in open(file, 'r', encoding='utf-8'):
155+
if re.search('^\s+constraint:*', line):
156+
c = line.split(':')[1].strip()
157+
c = c.strip('"')
158+
if c in s:
159+
if file not in file_load_list:
160+
file_load_list.add(file)
161+
with open(file, 'r', encoding='utf-8') as yf:
162+
cls._included = []
163+
bindings_list[c] = yaml.load(yf, cls)
164+
165+
# collapse the bindings inherited information before return
166+
return cls._collapse_inherited(bindings_list)
167+
168+
def __init__(self, stream):
169+
filepath = os.path.realpath(stream.name)
170+
if filepath in self._included:
171+
print("Error:: circular inclusion for file name '{}'".
172+
format(stream.name))
173+
raise yaml.constructor.ConstructorError
174+
self._included.append(filepath)
175+
super(Binder, self).__init__(stream)
176+
Binder.add_constructor('!include', Binder._include)
177+
Binder.add_constructor('!import', Binder._include)
178+
179+
def _include(self, node):
180+
if isinstance(node, yaml.ScalarNode):
181+
return self._extract_file(self.construct_scalar(node))
182+
183+
elif isinstance(node, yaml.SequenceNode):
184+
result = []
185+
for filename in self.construct_sequence(node):
186+
result.append(self._extract_file(filename))
187+
return result
188+
189+
elif isinstance(node, yaml.MappingNode):
190+
result = {}
191+
for k, v in self.construct_mapping(node).iteritems():
192+
result[k] = self._extract_file(v)
193+
return result
194+
195+
else:
196+
print("Error: unrecognised node type in !include statement")
197+
raise yaml.constructor.ConstructorError
198+
199+
def _extract_file(self, filename):
200+
filepaths = [filepath for filepath in self._files if filepath.endswith(filename)]
201+
if len(filepaths) == 0:
202+
print("Error: unknown file name '{}' in !include statement".
203+
format(filename))
204+
raise yaml.constructor.ConstructorError
205+
elif len(filepaths) > 1:
206+
# multiple candidates for filename
207+
files = []
208+
for filepath in filepaths:
209+
if os.path.basename(filename) == os.path.basename(filepath):
210+
files.append(filepath)
211+
if len(files) > 1:
212+
print("Error: multiple candidates for file name '{}' in !include statement".
213+
format(filename), filepaths)
214+
raise yaml.constructor.ConstructorError
215+
filepaths = files
216+
with open(filepaths[0], 'r', encoding='utf-8') as f:
217+
return yaml.load(f, Binder)

0 commit comments

Comments
 (0)