Skip to content
42 changes: 26 additions & 16 deletions workspace_tools/build_release.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

from workspace_tools.build_api import build_mbed_libs
from workspace_tools.build_api import write_build_report
from workspace_tools.targets import TARGET_MAP
from workspace_tools.targets import TARGET_MAP, TARGET_NAMES
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
from workspace_tools.test_api import SingleTestRunner
from workspace_tools.test_api import singletest_in_cli_mode
Expand Down Expand Up @@ -126,7 +126,7 @@
('ARM_MPS2_M3' , ('ARM',)),
('ARM_MPS2_M4' , ('ARM',)),
('ARM_MPS2_M7' , ('ARM',)),
('ARM_MPS2_BEID' , ('ARM',)),
('ARM_IOTSS_BEID' , ('ARM',)),

('RZ_A1H' , ('ARM', 'GCC_ARM', 'IAR')),

Expand Down Expand Up @@ -209,19 +209,25 @@
"targets": {}
}

if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)

for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
toolchains = None
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue

if target_name not in TARGET_NAMES:
print "Target '%s' is not a valid target. Excluding from release"
continue

if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list

if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))

Expand All @@ -233,24 +239,28 @@

test_spec["targets"][target_name] = toolchains

single_test = SingleTestRunner(_muts=mut,
_opts_report_build_file_name=options.report_build_file_name,
_test_spec=test_spec,
_opts_test_by_names=",".join(test_names),
_opts_verbose=options.verbose,
_opts_only_build_tests=True,
_opts_suppress_summary=True,
_opts_jobs=options.jobs,
_opts_include_non_automated=True,
_opts_build_report=build_report,
_opts_build_properties=build_properties)
# Runs test suite in CLI mode
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, new_build_report, new_build_properties = single_test.execute()
single_test = SingleTestRunner(_muts=mut,
_opts_report_build_file_name=options.report_build_file_name,
_test_spec=test_spec,
_opts_test_by_names=",".join(test_names),
_opts_verbose=options.verbose,
_opts_only_build_tests=True,
_opts_suppress_summary=True,
_opts_jobs=options.jobs,
_opts_include_non_automated=True,
_opts_build_report=build_report,
_opts_build_properties=build_properties)
# Runs test suite in CLI mode
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, new_build_report, new_build_properties = single_test.execute()
else:
for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue

if target_name not in TARGET_NAMES:
print "Target '%s' is not a valid target. Excluding from release"
continue

if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
Expand Down
12 changes: 6 additions & 6 deletions workspace_tools/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@
armcc = "standalone" # "keil", or "standalone", or "ds-5"

if armcc == "keil":
ARM_PATH = "C:/Keil_4_54/ARM"
ARM_BIN = join(ARM_PATH, "BIN40")
ARM_INC = join(ARM_PATH, "RV31", "INC")
ARM_LIB = join(ARM_PATH, "RV31", "LIB")
ARM_PATH = "C:/Keil_v5/ARM/ARMCC"
ARM_BIN = join(ARM_PATH, "bin")
ARM_INC = join(ARM_PATH, "incldue")
ARM_LIB = join(ARM_PATH, "lib")

elif armcc == "standalone":
ARM_PATH = "C:/Program Files/ARM/armcc_4.1_791"
ARM_PATH = "C:/Program Files (x86)/ARM_Compiler_5.06u1"
ARM_BIN = join(ARM_PATH, "bin")
ARM_INC = join(ARM_PATH, "include")
ARM_LIB = join(ARM_PATH, "lib")
Expand All @@ -57,7 +57,7 @@
GCC_CR_PATH = "C:/code_red/RedSuite_4.2.0_349/redsuite/Tools/bin"

# IAR
IAR_PATH = "C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.0/arm"
IAR_PATH = "C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.3/arm"

# Goanna static analyser. Please overload it in private_settings.py
GOANNA_PATH = "c:/Program Files (x86)/RedLizards/Goanna Central 3.2.3/bin"
Expand Down
15 changes: 8 additions & 7 deletions workspace_tools/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -1119,13 +1119,14 @@ def __init__(self, n):
self.__dict__.update(TESTS[n])

def is_supported(self, target, toolchain):
if hasattr(self, 'mcu') and not target in self.mcu:
return False
if hasattr(self, 'exclude_mcu') and target in self.exclude_mcu:
return False
if not hasattr(self, 'supported'):
return True
return (target in self.supported) and (toolchain in self.supported[target])
return True
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This info is still pretty useful, even if we don't necessarily want to use it at times.

I'd suggest adding an option in singletest.py to ignore the is_supported function here, but still keep the ability to use this info.

I'd suggest adding the --include-unsupported option to singletest.py. That way we keep backward compatibility but we add the ability to build all tests. We'd simply just have to check for this option here: https://github.com/mbedmicro/mbed/blob/1f340445d5333b485923beb9a7216fc54a8efeb4/workspace_tools/test_api.py#L652

If this sounds like a good option, then I'd suggest reverting the changes here (uncomment the commented code), merge the PR, and I can submit a change to singletest.py

#if hasattr(self, 'mcu') and not target in self.mcu:
# return False
#if hasattr(self, 'exclude_mcu') and target in self.exclude_mcu:
# return False
#if not hasattr(self, 'supported'):
# return True
#return (target in self.supported) and (toolchain in self.supported[target])

def get_description(self):
if self.description:
Expand Down
91 changes: 72 additions & 19 deletions workspace_tools/upload_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,32 +125,84 @@ def add_project_runs(args):
project_run_data['hostOses_set'] = set()
project_run_data['hostOses_set'].add(args.host_os)

add_report(project_run_data, args.build_report, True, args.build_id, args.host_os)
if args.build_report:
add_report(project_run_data, args.build_report, True, args.build_id, args.host_os)

if (args.test_report):
if args.test_report:
add_report(project_run_data, args.test_report, False, args.build_id, args.host_os)

ts_data = format_project_run_data(project_run_data)
r = requests.post(urlparse.urljoin(args.url, "api/projectRuns"), headers=create_headers(args), json=ts_data)
finish_command('add-project-runs', r)
ts_data = format_project_run_data(project_run_data, args.limit)
total_result = True

total_parts = len(ts_data)
print "Uploading project runs in %d parts" % total_parts

for index, data in enumerate(ts_data):
r = requests.post(urlparse.urljoin(args.url, "api/projectRuns"), headers=create_headers(args), json=data)
print("add-project-runs part %d/%d" % (index + 1, total_parts), r.status_code, r.reason)
print(r.text)

if r.status_code >= 400:
total_result = False

if total_result:
print "'add-project-runs' completed successfully"
sys.exit(0)
else:
print "'add-project-runs' failed"
sys.exit(2)

def format_project_run_data(project_run_data):
def prep_ts_data():
ts_data = {}
ts_data['projectRuns'] = []
ts_data['platforms'] = set()
ts_data['vendors'] = set()
ts_data['toolchains'] = set()
ts_data['names'] = set()
ts_data['hostOses'] = set()
return ts_data

for hostOs in project_run_data['projectRuns'].values():
for platform in hostOs.values():
for toolchain in platform.values():
for project in toolchain.values():
ts_data['projectRuns'].append(project)

ts_data['platforms'] = list(project_run_data['platforms_set'])
def finish_ts_data(ts_data, project_run_data):
ts_data['platforms'] = list(ts_data['platforms'])
ts_data['vendors'] = list(ts_data['vendors'])
ts_data['toolchains'] = list(ts_data['toolchains'])
ts_data['names'] = list(ts_data['names'])
ts_data['hostOses'] = list(ts_data['hostOses'])

# Add all vendors to every projectRun submission
# TODO Either add "vendor" to the "project_run_data"
# or remove "vendor" entirely from the viewer
ts_data['vendors'] = list(project_run_data['vendors_set'])
ts_data['toolchains'] = list(project_run_data['toolchains_set'])
ts_data['names'] = list(project_run_data['names_set'])
ts_data['hostOses'] = list(project_run_data['hostOses_set'])

def format_project_run_data(project_run_data, limit):
all_ts_data = []
current_limit_count = 0

ts_data = prep_ts_data()
ts_data['projectRuns'] = []

return ts_data
for hostOs_name, hostOs in project_run_data['projectRuns'].iteritems():
for platform_name, platform in hostOs.iteritems():
for toolchain_name, toolchain in platform.iteritems():
for project_name, project in toolchain.iteritems():
if current_limit_count >= limit:
finish_ts_data(ts_data, project_run_data)
all_ts_data.append(ts_data)
ts_data = prep_ts_data()
current_limit_count = 0

ts_data['projectRuns'].append(project)
ts_data['platforms'].add(platform_name)
ts_data['toolchains'].add(toolchain_name)
ts_data['names'].add(project_name)
ts_data['hostOses'].add(hostOs_name)
current_limit_count += 1

if current_limit_count > 0:
finish_ts_data(ts_data, project_run_data)
all_ts_data.append(ts_data)

return all_ts_data

def find_project_run(projectRuns, project):
keys = ['hostOs', 'platform', 'toolchain', 'project']
Expand Down Expand Up @@ -308,13 +360,14 @@ def main(arguments):

add_project_runs_parser = subparsers.add_parser('add-project-runs', help='add project runs to a build')
add_project_runs_parser.add_argument('-b', '--build-id', required=True, help='build id')
add_project_runs_parser.add_argument('-r', '--build-report', required=True, help='path to junit xml build report')
add_project_runs_parser.add_argument('-r', '--build-report', required=False, help='path to junit xml build report')
add_project_runs_parser.add_argument('-t', '--test-report', required=False, help='path to junit xml test report')
add_project_runs_parser.add_argument('-o', '--host-os', required=True, help='host os on which test was run')
add_project_runs_parser.add_argument('-l', '--limit', required=False, type=int, default=1000, help='Limit the number of project runs sent at a time to avoid HTTP errors (default is 1000)')
add_project_runs_parser.set_defaults(func=add_project_runs)

args = parser.parse_args(arguments)
args.func(args)

if __name__ == '__main__':
main(sys.argv[1:])
main(sys.argv[1:])