diff --git a/tests/cluecode/cluecode_test_utils.py b/tests/cluecode/cluecode_test_utils.py index 68afa3e7b8b..466feaa4e86 100644 --- a/tests/cluecode/cluecode_test_utils.py +++ b/tests/cluecode/cluecode_test_utils.py @@ -216,7 +216,7 @@ def closure_test_function(*args, **kwargs): '\ntest file: file://' + test_file + '\n' ) + expected_yaml - assert expected_yaml == results_yaml + assert results_yaml == expected_yaml data_file = test.data_file test_file = test.test_file diff --git a/tests/cluecode/test_copyrights_basic.py b/tests/cluecode/test_copyrights_basic.py index b0e577d7a44..a0c3c7986e7 100644 --- a/tests/cluecode/test_copyrights_basic.py +++ b/tests/cluecode/test_copyrights_basic.py @@ -20,51 +20,51 @@ class TestTextPreparation(FileBasedTesting): def test_strip_leading_numbers(self): a = '2.6.6 (r266:84297, Aug 24 2010, 18:46:32) [MSC v.1500 32 bit (Intel)] on win32' - assert a == copyrights_module.strip_leading_numbers(a) + assert copyrights_module.strip_leading_numbers(a) == a a = '26 6 24 2010, 18:46:32) [MSC v.1500 32 bit (Intel)] on 12' expected = '2010, 18:46:32) [MSC v.1500 32 bit (Intel)] on 12' - assert expected == copyrights_module.strip_leading_numbers(a) + assert copyrights_module.strip_leading_numbers(a) == expected def test_prepare_text_line(self): cp = 'test (C) all rights reserved' result = copyrights_module.prepare_text_line(cp) - assert 'test (c) all rights reserved' == result + assert result == 'test (c) all rights reserved' def test_prepare_text_line_debian(self): cp = 'Parts Copyright (c) 1992 Uri Blumenthal, IBM' result = copyrights_module.prepare_text_line(cp) - assert 'Parts Copyright (c) 1992 Uri Blumenthal, IBM' == result + assert result == 'Parts Copyright (c) 1992 Uri Blumenthal, IBM' def test_prepare_text_line_does_not_truncate_transliterable_unicode(self): cp = u'Muła' result = copyrights_module.prepare_text_line(cp) - assert 'Mula' == result + assert result == 'Mula' def test_strip_markup(self): cp = 'Parts Copyright (c) 1992 Uri Blumenthal, IBM' result = copyrights_module.strip_markup(cp) - assert 'Parts Copyright (c) 1992 Uri Blumenthal, IBM' == result + assert result == 'Parts Copyright (c) 1992 Uri Blumenthal, IBM' def test_prepare_text_line_removes_C_comments(self): cp = '/* Copyright 1996-2005, 2008-2011 by */' result = copyrights_module.prepare_text_line(cp) - assert 'Copyright 1996-2005, 2008-2011 by' == result + assert result == 'Copyright 1996-2005, 2008-2011 by' def test_prepare_text_line_removes_C_comments2(self): cp = '/* David Turner, Robert Wilhelm, and Werner Lemberg. */' result = copyrights_module.prepare_text_line(cp) - assert 'David Turner, Robert Wilhelm, and Werner Lemberg.' == result + assert result == 'David Turner, Robert Wilhelm, and Werner Lemberg.' def test_prepare_text_line_removes_Cpp_comments(self): cp = '// David Turner, Robert Wilhelm, and Werner Lemberg. */' result = copyrights_module.prepare_text_line(cp) - assert 'David Turner, Robert Wilhelm, and Werner Lemberg.' == result + assert result == 'David Turner, Robert Wilhelm, and Werner Lemberg.' def test_prepare_text_line_does_not_damage_urls(self): cp = 'copyright (c) 2000 World Wide Web Consortium, http://www.w3.org' result = copyrights_module.prepare_text_line(cp) - assert 'copyright (c) 2000 World Wide Web Consortium, http://www.w3.org' == result + assert result == 'copyright (c) 2000 World Wide Web Consortium, http://www.w3.org' def test_is_end_of_statement(self): line = ''' "All rights reserved\\n"''' @@ -75,7 +75,7 @@ def test_candidate_lines_simple(self): lines = [(1, ' test (C) all rights reserved')] result = list(copyrights_module.candidate_lines(lines)) expected = [[(1, ' test (C) all rights reserved')]] - assert expected == result + assert result == expected def test_candidate_lines_complex(self): lines = ''' @@ -119,7 +119,7 @@ def test_candidate_lines_complex(self): ] result = list(copyrights_module.candidate_lines(enumerate(lines, 1))) - assert expected == result + assert result == expected def test_is_candidates_should_not_select_line_with_bare_full_year(self): line = '2012' @@ -204,7 +204,7 @@ def test_detect(self): '(c) 2008', ] copyrights, _, _ = cluecode_test_utils.copyright_detector(location) - assert expected == copyrights + assert copyrights == expected def test_detect_with_lines(self): location = self.get_test_loc('copyrights_basic/essential_smoke-ibm_c.c') @@ -216,7 +216,7 @@ def test_detect_with_lines(self): ('copyrights', u'(c) 2008', 8, 8) ] results = list(copyrights_module.detect_copyrights(location)) - assert expected == results + assert results == expected def test_detect_with_lines_only_holders(self): location = self.get_test_loc('copyrights_basic/essential_smoke-ibm_c.c') @@ -225,7 +225,7 @@ def test_detect_with_lines_only_holders(self): ('holders', u'Eclipse, IBM and others', 8, 8) ] results = list(copyrights_module.detect_copyrights(location, copyrights=False, authors=False)) - assert expected == results + assert results == expected def check_detection_with_lines(expected, test_file): @@ -241,7 +241,7 @@ def check_detection_with_lines(expected, test_file): ) results = [(statement, start, end) for _t, statement, start, end in detections] - assert expected == results + assert results == expected class TestCopyrightLinesDetection(FileBasedTesting): diff --git a/tests/cluecode/test_copyrights_fosso.py b/tests/cluecode/test_copyrights_fosso.py index 2680582f051..7083e19af56 100644 --- a/tests/cluecode/test_copyrights_fosso.py +++ b/tests/cluecode/test_copyrights_fosso.py @@ -54,7 +54,7 @@ def build_copyright_test_methods_with_fossology_data(): expected_files.append(tf) files_to_test.append(tf.replace('_raw', '')) - assert sorted(test_files) == sorted(files_to_test + expected_files) + assert sorted(files_to_test + expected_files) == sorted(test_files) copyregex = re.compile('(.*?)', re.DOTALL | re.UNICODE) # NOQA for expected_file, test_file in zip(expected_files, files_to_test): @@ -132,14 +132,14 @@ def copyright_test_method(self): copyrights, _authors, _holders = cluecode_test_utils.copyright_detector(test_file_loc) try: - assert expected == copyrights + assert copyrights == expected except: failure_trace = [ 'Failed to detect copyright in: file://' + test_file_loc, '\n', 'expected as file://' + expected_file_loc, '\n', ] + expected - assert failure_trace == copyrights + assert copyrights == failure_trace return copyright_test_method diff --git a/tests/cluecode/test_finder.py b/tests/cluecode/test_finder.py index 90ab869bd10..2a743a7ae52 100644 --- a/tests/cluecode/test_finder.py +++ b/tests/cluecode/test_finder.py @@ -54,7 +54,7 @@ def test_emails_regex(self): u'linux@3ware.com' ] result = re.findall(finder.emails_regex(), test_input) - assert expected == result + assert result == expected def test_find_emails_in_c(self): test_file = self.get_test_loc('finder/email/3w-xxxx.c') @@ -64,19 +64,19 @@ def test_find_emails_in_c(self): 'andre@suse.com' ] result = find_emails_tester(test_file) - assert expected == result + assert result == expected def test_find_emails_in_python1(self): test_file = self.get_test_loc('finder/email/jardiff.py') expected = ['jp_py@demonseed.net'] result = find_emails_tester(test_file) - assert expected == result + assert result == expected def test_find_emails_in_python2(self): test_file = self.get_test_loc('finder/email/thomas.py') expected = ['amir@divmod.org'] result = find_emails_tester(test_file) - assert expected == result + assert result == expected def test_find_emails_does_not_return_bogus_emails(self): lines = [ @@ -96,37 +96,37 @@ def test_find_emails_does_not_return_bogus_emails(self): ] expected = [] result = find_emails_tester(lines) - assert expected == result + assert result == expected def test_find_emails_does_not_return_png(self): lines = ['navigation-logo@2x.png'] expected = [] result = find_emails_tester(lines) - assert expected == result + assert result == expected def test_find_emails_does_not_return_incomplete_emails_or_example_emails(self): lines = ['user@...', 'thomas@...', '*@example.com', 'user@localhost'] expected = [] result = find_emails_tester(lines) - assert expected == result + assert result == expected def test_find_emails_filters_unique_by_default(self): lines = ['user@me.com', 'user@me.com'] expected = ['user@me.com'] result = find_emails_tester(lines) - assert expected == result + assert result == expected def test_find_emails_does_not_filter_unique_if_requested(self): lines = ['user@me.com', 'user@me.com'] expected = ['user@me.com', 'user@me.com'] result = find_emails_tester(lines, unique=False) - assert expected == result + assert result == expected def test_find_emails_does_return_line_number(self): lines = ['user@me.com', 'user2@me.com'] expected = [('user@me.com', 1), ('user2@me.com', 2)] result = find_emails_tester(lines, with_lineno=True) - assert expected == result + assert result == expected def test_find_emails_does_not_return_junk(self): lines = ''' @@ -153,13 +153,13 @@ def test_find_emails_does_not_return_junk(self): u'trivial@kernel.org' ] result = find_emails_tester(lines, with_lineno=False) - assert expected == result + assert result == expected def test_emails_does_filter_junk_domains(self): test_file = self.get_test_loc('finder/email/Content.json') expected = [] result = find_emails_tester(test_file) - assert expected == result + assert result == expected def test_emails_for_ignored_hosts(self): test_string = ''' @@ -175,7 +175,7 @@ def test_emails_for_ignored_hosts(self): u'efg@many.org' ] result = find_emails_tester(test_string, with_lineno=False) - assert expected == result + assert result == expected class TestUrl(FileBasedTesting): @@ -185,7 +185,7 @@ def test_urls_regex_without_http(self): result = re.match(finder.urls_regex(), u'www.something.domain.tld').group() expected = u'www.something.domain.tld' - assert expected == result + assert result == expected def test_urls_regex(self): test_file = self.get_test_loc('finder/url/BeautifulSoup.py') @@ -198,7 +198,7 @@ def test_urls_regex(self): u'http://chardet.feedparser.org/', u'http://cjkpython.i18n.org/', ] - assert expected == re.findall(finder.urls_regex(), test_input) + assert re.findall(finder.urls_regex(), test_input) == expected def test_canonical_url(self): data = ( @@ -214,7 +214,7 @@ def test_canonical_url(self): ) for test, expected in data: - assert expected == finder.canonical_url(test) + assert finder.canonical_url(test) == expected def test_find_urls_returns_unique(self): lines = [ @@ -256,7 +256,7 @@ def test_find_urls_returns_unique(self): u'http://kernelnewbies.org/', ] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_does_not_return_local_urls(self): lines = [ @@ -272,7 +272,7 @@ def test_find_urls_does_not_return_local_urls(self): ] expected = [] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_does_not_return_local_ip(self): lines = [ @@ -297,7 +297,7 @@ def test_find_urls_does_not_return_local_ip(self): ] expected = [u'http://172.32.120.155/'] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_is_good_host(self): assert finder.is_good_host('172.32.120.155') @@ -305,19 +305,19 @@ def test_is_good_host(self): def test_url_host_domain(self): result = finder.url_host_domain('http://svn.codehaus.org') expected = ('svn.codehaus.org', 'codehaus.org',) - assert expected == result + assert result == expected def test_find_urls_filters_bogus_url(self): lines = [u'http://__________________'] expected = [] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_with_square_brackets_from_trac_wiki_html(self): lines = ['title="Link: [http://www.somedo.com/ Example]"'] expected = ['http://www.somedo.com/'] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_in_pom(self): lines = [ @@ -352,7 +352,7 @@ def test_find_urls_in_pom(self): u'https://svn.sourceforge.net/svn/jtidy/trunk/jtidy/', ] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_in_file_with_markup_in_code(self): test_file = self.get_test_loc('finder/url/markup_in_code.c') @@ -367,7 +367,7 @@ def test_find_urls_in_file_with_markup_in_code(self): u'http://xml.libexpat.org/ns1' ] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls_does_not_return_duplicate_urls_by_default(self): test_file = self.get_test_loc('finder/url/nodupe.htm') @@ -377,13 +377,13 @@ def test_find_urls_does_not_return_duplicate_urls_by_default(self): u'http://www.edgewall.org/', ] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls__does_not_return_junk_urls(self): test_file = self.get_test_loc('finder/url/junk_urls.c') expected = [] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls_detects_urls_correcty_in_html(self): test_file = self.get_test_loc('finder/url/some_html.htm') @@ -440,7 +440,7 @@ def test_find_urls_detects_urls_correcty_in_html(self): u'http://www.edgewall.org/' ] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls_without_scheme_in_lines(self): lines = [ @@ -454,7 +454,7 @@ def test_find_urls_without_scheme_in_lines(self): u'http://www.programming-with-objects.com/', ] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_without_scheme_in_python(self): test_file = self.get_test_loc('finder/url/no-scheme.py') @@ -464,13 +464,13 @@ def test_find_urls_without_scheme_in_python(self): u'http://www.programming-with-objects.com/', ] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls_filters_invalid_urls(self): test_file = self.get_test_loc('finder/url/truncated_url') result = find_urls_tester(test_file) expected = [] - assert expected == result + assert result == expected def test_find_urls_with_fragments(self): test_file = self.get_test_loc('finder/url/ABOUT') @@ -481,7 +481,7 @@ def test_find_urls_with_fragments(self): u'http://pypi.python.org/packages/2.4/P/Pygments/Pygments-0.11.1-py2.4.egg#md5=52d7a46a91a4a426f8fbc681c5c6f1f5', ] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls_in_python(self): test_file = self.get_test_loc('finder/url/BeautifulSoup.py') @@ -492,37 +492,37 @@ def test_find_urls_in_python(self): u'http://www.crummy.com/software/BeautifulSoup/documentation.html', ] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls_in_java(self): test_file = self.get_test_loc('finder/url/IMarkerActionFilter.java') expected = [u'http://www.eclipse.org/legal/epl-v10.html'] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls_filters_unique_by_default(self): lines = ['http://www.me.com', 'http://www.me.com'] expected = ['http://www.me.com/'] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_does_not_filter_unique_if_requested(self): lines = ['http://www.me.com', 'http://www.me.com'] expected = ['http://www.me.com/', 'http://www.me.com/'] result = find_urls_tester(lines, unique=False) - assert expected == result + assert result == expected def test_find_urls_does_return_line_number(self): lines = ['http://www.me.com', 'http://www.me2.com'] expected = [('http://www.me.com/', 1), ('http://www.me2.com/', 2)] result = find_urls_tester(lines, with_lineno=True) - assert expected == result + assert result == expected def test_find_urls_finds_git_urls(self): lines = ['git@github.com:christophercantu/pipeline.git', ] expected = ['git@github.com:christophercantu/pipeline.git'] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_does_not_crash_on_weird_urls(self): lines = [ @@ -531,12 +531,12 @@ def test_find_urls_does_not_crash_on_weird_urls(self): ] expected = [] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_urls_in_classfiles_does_not_return_junk_urls(self): test_file = self.get_test_loc('finder/url/XMLConstants.class') result = find_urls_tester(test_file) - assert [] == result + assert result == [] def test_misc_valid_urls(self): # set of good URLs from https://mathiasbynens.be/demo/url-regex @@ -551,7 +551,7 @@ def test_misc_valid_urls(self): ''' for test in urls.split(): result = [val for val, _ln in finder.find_urls([test])] - assert [test] == result + assert result == [test] def test_misc_valid_urls_reported_with_trailing_slash(self): # set of good URLs from https://mathiasbynens.be/demo/url-regex @@ -564,7 +564,7 @@ def test_misc_valid_urls_reported_with_trailing_slash(self): ''' for test in urls.split(): result = [val for val, _ln in finder.find_urls([test])] - assert [test + u'/'] == result + assert result == [test + u'/'] @expectedFailure def test_misc_valid_unicode_or_punycode_urls_that_should_pass(self): @@ -582,7 +582,7 @@ def test_misc_valid_unicode_or_punycode_urls_that_should_pass(self): ''' for test in urls.split(): result = [val for val, _ln in finder.find_urls([test])] - assert [test] == result + assert result == [test] @expectedFailure def test_misc_valid_urls_that_should_pass(self): @@ -596,7 +596,7 @@ def test_misc_valid_urls_that_should_pass(self): ''' for test in urls.split(): result = [val for val, _ln in finder.find_urls([test])] - assert [test] == result + assert result == [test] def test_example_dot_com_valid_urls_return_nothing(self): urls = u''' @@ -614,7 +614,7 @@ def test_example_dot_com_valid_urls_return_nothing(self): ''' for test in urls.split(): result = [val for val, _ln in finder.find_urls([test])] - assert [] == result + assert result == [] def test_misc_invalid_urls(self): # set of non URLs from https://mathiasbynens.be/demo/url-regex @@ -659,7 +659,7 @@ def test_misc_invalid_urls_that_should_not_crash(self): ''' for test in urls.split(): result = [val for val, _ln in finder.find_urls([test])] - assert [] == result + assert result == [] def test_misc_invalid_urls_that_are_still_detected_and_may_not_be_really_invalid(self): # set of non URLs from https://mathiasbynens.be/demo/url-regex @@ -679,7 +679,7 @@ def test_misc_invalid_urls_that_are_still_detected_and_normalized(self): ''' for test in urls.split(): result = [val for val, _ln in finder.find_urls([test])] - assert [test] == result + assert result == [test] def test_invalid_urls_are_not_detected(self): # set of non URLs from https://mathiasbynens.be/demo/url-regex @@ -689,7 +689,7 @@ def test_invalid_urls_are_not_detected(self): ''' for test in urls.split(): result = [val for val, _ln in finder.find_urls([test])] - assert [] == result + assert result == [] def test_misc_invalid_urls_that_should_not_be_detected(self): # At least per this set of non URLs from https://mathiasbynens.be/demo/url-regex @@ -729,7 +729,7 @@ def test_find_urls_in_go_does_not_crash_with_unicode_error(self): 'https://tools.ietf.org/html/rfc6125#appendix-B.2' ] result = find_urls_tester(test_file) - assert expected == result + assert result == expected def test_find_urls_does_not_crash_on_mojibake_bytes(self): lines = [ @@ -737,13 +737,13 @@ def test_find_urls_does_not_crash_on_mojibake_bytes(self): ] expected = ['https://tools.ietf.org/html/rfc2821#section-4.1.2'] result = find_urls_tester(lines) - assert expected == result + assert result == expected def test_find_in_go_does_not_crash_with_unicode_error(self): test_file = self.get_test_loc('finder/url/verify.go') patterns = [('urls', urls_regex(),)] for _key, url, _line, _lineno in find(test_file, patterns): - assert type(url) == str + assert str == type(url) class TestSearch(FileBasedTesting): @@ -762,7 +762,7 @@ def test_search_is_non_unique_by_default(self): for test_file, expected in tests: location = os.path.join(test_dir, test_file) result = list(s for s, _ln in finder.find_pattern(location, pattern)) - assert expected == result + assert result == expected def test_search_unique(self): test_dir = self.get_test_loc('finder/search', copy=True) @@ -777,11 +777,11 @@ def test_search_unique(self): for test_file, expected in tests: location = os.path.join(test_dir, test_file) result = list(s for s, _ln in finder.find_pattern(location, pattern, unique=True)) - assert expected == result + assert result == expected def test_search_in_binaries_with_line(self): test_file = self.get_test_loc('finder/binaries/gapi32.dll') pattern = r'This program ([\(\w\)\.\- ]+)' expected = [('cannot be run in DOS mode.', 1)] result = list(finder.find_pattern(test_file, pattern)) - assert expected == result + assert result == expected diff --git a/tests/formattedcode/test_output_csv.py b/tests/formattedcode/test_output_csv.py index 73ac5557dbc..e6d24a8db3b 100644 --- a/tests/formattedcode/test_output_csv.py +++ b/tests/formattedcode/test_output_csv.py @@ -44,7 +44,7 @@ def check_json(result, expected_file, regen=False): reg.write(json.dumps(result, indent=4, separators=(',', ': '))) with io.open(expected_file, encoding='utf-8') as exp: expected = json.load(exp) - assert expected == result + assert result == expected def check_csvs(result_file, expected_file, @@ -59,13 +59,13 @@ def check_csvs(result_file, expected_file, import shutil shutil.copy2(result_file, expected_file) expected_fields, expected = load_csv(expected_file) - assert expected_fields == result_fields + assert result_fields == expected_fields # then check results line by line for more compact results for exp, res in zip(sorted(expected , key=lambda d: d.items()), sorted(results , key=lambda d: d.items())): for ign in ignore_keys: exp.pop(ign, None) res.pop(ign, None) - assert exp == res + assert res == exp def load_csv(location): @@ -196,8 +196,8 @@ def test_flatten_scan_with_no_keys_does_not_error_out(): ('url', []), ('package', []), ]) - assert expected_headers == headers - assert [] == result + assert headers == expected_headers + assert result == [] @pytest.mark.scanslow diff --git a/tests/formattedcode/test_output_spdx.py b/tests/formattedcode/test_output_spdx.py index 858492eb1e8..f0532eb0290 100644 --- a/tests/formattedcode/test_output_spdx.py +++ b/tests/formattedcode/test_output_spdx.py @@ -127,7 +127,7 @@ def check_rdf_scan(expected_file, result_file, regen=False): expected = json.load(i) expected = load_and_clean_rdf(result_file) - assert json.dumps(expected, indent=2) == json.dumps(result, indent=2) + assert json.dumps(result, indent=2) == json.dumps(expected, indent=2) def load_and_clean_tv(location): @@ -154,7 +154,7 @@ def check_tv_scan(expected_file, result_file, regen=False): o.write(result) expected = load_and_clean_tv(expected_file) - assert expected == result + assert result == expected def test_spdx_rdf_basic(): @@ -314,7 +314,7 @@ def test_spdx_rdf_with_empty_scan(): run_scan_plain(args) expected = "\n" results = open(result_file).read() - assert expected == results + assert results == expected @pytest.mark.scanslow diff --git a/tests/licensedcode/licensedcode_test_utils.py b/tests/licensedcode/licensedcode_test_utils.py index 20e44de5099..aaf679b6e15 100644 --- a/tests/licensedcode/licensedcode_test_utils.py +++ b/tests/licensedcode/licensedcode_test_utils.py @@ -240,7 +240,7 @@ def closure_test_function(*args, **kwargs): failure_trace.append('file://{test_data_file}'.format(**locals())) # this assert will always fail and provide a detailed failure trace - assert '\n'.join(results) == '\n'.join(failure_trace) + assert '\n'.join(failure_trace) == '\n'.join(results) closure_test_function.__name__ = test_name diff --git a/tests/licensedcode/test_detect.py b/tests/licensedcode/test_detect.py index 1af9522e8b3..ccbb1d5a133 100644 --- a/tests/licensedcode/test_detect.py +++ b/tests/licensedcode/test_detect.py @@ -41,15 +41,15 @@ def test_match_does_not_return_matches_for_empty_query(self): idx = MiniLicenseIndex([Rule(stored_text='A one. A two. license A three.')]) matches = idx.match(query_string='') - assert [] == matches + assert matches == [] matches = idx.match(query_string=None) - assert [] == matches + assert matches == [] def test_match_does_not_return_matches_for_junk_queries(self): idx = MiniLicenseIndex([Rule(stored_text='A one. a license two. license A three.')]) - assert [] == idx.match(query_string=u'some other junk') - assert [] == idx.match(query_string=u'some junk') + assert idx.match(query_string=u'some other junk') == [] + assert idx.match(query_string=u'some junk') == [] def test_match_return_one_match_with_correct_offsets(self): idx = MiniLicenseIndex([ @@ -61,14 +61,14 @@ def test_match_return_one_match_with_correct_offsets(self): # 0 1 2 3 4 5 6 7 8 matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] qtext, itext = get_texts(match) - assert 'one. A license two. A three.' == qtext - assert 'one license two three' == itext + assert qtext == 'one. A license two. A three.' + assert itext == 'one license two three' - assert Span(0, 3) == match.qspan - assert Span(0, 3) == match.ispan + assert match.qspan == Span(0, 3) + assert match.ispan == Span(0, 3) def test_match_can_match_exactly_rule_text_used_as_query(self): test_file = self.get_test_loc('detect/mit/mit.c') @@ -76,13 +76,13 @@ def test_match_can_match_exactly_rule_text_used_as_query(self): idx = MiniLicenseIndex([rule]) matches = idx.match(test_file) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert rule == match.rule - assert Span(0, 85) == match.qspan - assert Span(0, 85) == match.ispan - assert 100 == match.coverage() - assert 100 == match.score() + assert match.rule == rule + assert match.qspan == Span(0, 85) + assert match.ispan == Span(0, 85) + assert match.coverage() == 100 + assert match.score() == 100 def test_match_matches_correctly_simple_exact_query_1(self): tf1 = self.get_test_loc('detect/mit/mit.c') @@ -91,11 +91,11 @@ def test_match_matches_correctly_simple_exact_query_1(self): query_doc = self.get_test_loc('detect/mit/mit2.c') matches = idx.match(query_doc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert ftr == match.rule - assert Span(0, 85) == match.qspan - assert Span(0, 85) == match.ispan + assert match.rule == ftr + assert match.qspan == Span(0, 85) + assert match.ispan == Span(0, 85) def test_match_matches_correctly_simple_exact_query_across_query_runs(self): tf1 = self.get_test_loc('detect/mit/mit.c') @@ -103,7 +103,7 @@ def test_match_matches_correctly_simple_exact_query_across_query_runs(self): idx = MiniLicenseIndex([ftr]) query_doc = self.get_test_loc('detect/mit/mit3.c') matches = idx.match(query_doc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] qtext, itext = get_texts(match) @@ -130,7 +130,7 @@ def test_match_matches_correctly_simple_exact_query_across_query_runs(self): of the Software. ''' - assert ' '.join(expected_qtext.split()) == ' '.join(qtext.split()) + assert ' '.join(qtext.split()) == ' '.join(expected_qtext.split()) expected_itext = u''' Permission is hereby granted free of charge to any person obtaining @@ -142,7 +142,7 @@ def test_match_matches_correctly_simple_exact_query_across_query_runs(self): copyright notice and this permission notice shall be included in all copies or substantial portions of the Software '''.lower() - assert ' '.join(expected_itext.split()) == ' '.join(itext.split()) + assert ' '.join(itext.split()) == ' '.join(expected_itext.split()) def test_match_with_surrounding_junk_should_return_an_exact_match(self): tf1 = self.get_test_loc('detect/mit/mit.c') @@ -165,7 +165,7 @@ def test_match_with_surrounding_junk_should_return_an_exact_match(self): // The above copyright "[add] [text]" notice and this permission notice shall be included in // all copies or substantial portions of the Software. '''.split() - assert expected_qtext == qtext.split() + assert qtext.split() == expected_qtext expected_itext = u''' permission is hereby granted free of charge to any person obtaining @@ -177,19 +177,19 @@ def test_match_with_surrounding_junk_should_return_an_exact_match(self): copyright notice and this permission notice shall be included in all copies or substantial portions of the software '''.lower().split() - assert expected_itext == itext.split() + assert itext.split() == expected_itext - assert Span(0, 85) == match.qspan - assert Span(0, 85) == match.ispan - assert 95.56 == match.score() + assert match.qspan == Span(0, 85) + assert match.ispan == Span(0, 85) + assert match.score() == 95.56 def test_match_to_single_word_does_not_have_zero_score(self): idx = MiniLicenseIndex( [Rule(stored_text='LGPL', license_expression='lgpl-2.0')] ) matches = idx.match(query_string='LGPL') - assert 1 == len(matches) - assert 5.0 == matches[0].score() + assert len(matches) == 1 + assert matches[0].score() == 5.0 def test_match_to_threshold_words_has_hundred_score(self): threshold = 18 @@ -197,8 +197,8 @@ def test_match_to_threshold_words_has_hundred_score(self): [Rule(stored_text=' LGPL ' * threshold, license_expression='lgpl-2.0')] ) matches = idx.match(query_string=' LGPL ' * threshold) - assert 1 == len(matches) - assert 100.0 == matches[0].score() + assert len(matches) == 1 + assert matches[0].score() == 100.0 def test_match_can_match_approximately(self): rule_file = self.get_test_loc('approx/mit/mit.c') @@ -207,15 +207,15 @@ def test_match_can_match_approximately(self): query_doc = self.get_test_loc('approx/mit/mit4.c') matches = idx.match(query_doc) - assert 2 == len(matches) + assert len(matches) == 2 m1 = matches[0] m2 = matches[1] - assert rule == m1.rule - assert rule == m2.rule - assert 100 == m1.coverage() - assert 100 == m2.coverage() - assert 95.56 == m1.score() - assert 93.48 == m2.score() + assert m1.rule == rule + assert m2.rule == rule + assert m1.coverage() == 100 + assert m2.coverage() == 100 + assert m1.score() == 95.56 + assert m2.score() == 93.48 def test_match_return_correct_positions_with_short_index_and_queries(self): idx = MiniLicenseIndex( @@ -223,57 +223,57 @@ def test_match_return_correct_positions_with_short_index_and_queries(self): ) matches = idx.match(query_string='MIT License') - assert 1 == len(matches) + assert len(matches) == 1 qtext, itext = get_texts(matches[0]) - assert 'MIT License' == qtext - assert 'mit license' == itext - assert Span(0, 1) == matches[0].qspan - assert Span(0, 1) == matches[0].ispan + assert qtext == 'MIT License' + assert itext == 'mit license' + assert matches[0].qspan == Span(0, 1) + assert matches[0].ispan == Span(0, 1) matches = idx.match(query_string='MIT MIT License') - assert 1 == len(matches) + assert len(matches) == 1 qtext, itext = get_texts(matches[0]) - assert 'MIT License' == qtext - assert 'mit license' == itext + assert qtext == 'MIT License' + assert itext == 'mit license' assert Span(1, 2) == matches[0].qspan assert Span(0, 1) == matches[0].ispan query_doc1 = 'do you think I am a mit license MIT License, yes, I think so' # # 0 1 2 3 matches = idx.match(query_string=query_doc1) - assert 2 == len(matches) + assert len(matches) == 2 qtext, itext = get_texts(matches[0]) - assert 'mit license' == qtext - assert 'mit license' == itext - assert Span(0, 1) == matches[0].qspan - assert Span(0, 1) == matches[0].ispan + assert qtext == 'mit license' + assert itext == 'mit license' + assert matches[0].qspan == Span(0, 1) + assert matches[0].ispan == Span(0, 1) qtext, itext = get_texts(matches[1]) - assert 'MIT License,' == qtext - assert 'mit license' == itext - assert Span(2, 3) == matches[1].qspan - assert Span(0, 1) == matches[1].ispan + assert qtext == 'MIT License,' + assert itext == 'mit license' + assert matches[1].qspan == Span(2, 3) + assert matches[1].ispan == Span(0, 1) query_doc2 = '''do you think I am a mit license MIT License yes, I think so''' matches = idx.match(query_string=query_doc2) - assert 2 == len(matches) + assert len(matches) == 2 qtext, itext = get_texts(matches[0]) - assert 'mit license' == qtext - assert 'mit license' == itext - assert Span(0, 1) == matches[0].qspan - assert Span(0, 1) == matches[0].ispan + assert qtext == 'mit license' + assert itext == 'mit license' + assert matches[0].qspan == Span(0, 1) + assert matches[0].ispan == Span(0, 1) qtext, itext = get_texts(matches[1]) - assert 'MIT License' == qtext - assert 'mit license' == itext - assert Span(2, 3) == matches[1].qspan - assert Span(0, 1) == matches[1].ispan + assert qtext == 'MIT License' + assert itext == 'mit license' + assert matches[1].qspan == Span(2, 3) + assert matches[1].ispan == Span(0, 1) def test_match_simple_rule(self): tf1 = self.get_test_loc('detect/mit/t1.txt') @@ -282,35 +282,35 @@ def test_match_simple_rule(self): query_doc = self.get_test_loc('detect/mit/t2.txt') matches = idx.match(query_doc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert Span(0, 240) == match.qspan - assert Span(0, 240) == match.ispan - assert (1, 27,) == match.lines() - assert 100 == match.coverage() - assert 100 == match.score() + assert match.qspan == Span(0, 240) + assert match.ispan == Span(0, 240) + assert match.lines() == (1, 27,) + assert match.coverage() == 100 + assert match.score() == 100 def test_match_works_with_special_characters_1(self): test_file = self.get_test_loc('detect/specialcharacter/kerberos.txt') idx = MiniLicenseIndex([Rule(text_file=test_file, license_expression='kerberos')]) - assert 1 == len(idx.match(test_file)) + assert len(idx.match(test_file)) == 1 def test_match_works_with_special_characters_2(self): test_file = self.get_test_loc('detect/specialcharacter/kerberos1.txt') idx = MiniLicenseIndex([Rule(text_file=test_file, license_expression='kerberos')]) - assert 1 == len(idx.match(test_file)) + assert len(idx.match(test_file)) == 1 def test_match_works_with_special_characters_3(self): test_file = self.get_test_loc('detect/specialcharacter/kerberos2.txt') idx = MiniLicenseIndex( [Rule(text_file=test_file, license_expression='kerberos')] ) - assert 1 == len(idx.match(test_file)) + assert len(idx.match(test_file)) == 1 def test_match_works_with_special_characters_4(self): test_file = self.get_test_loc('detect/specialcharacter/kerberos3.txt') idx = MiniLicenseIndex([Rule(text_file=test_file, license_expression='kerberos')]) - assert 1 == len(idx.match(test_file)) + assert len(idx.match(test_file)) == 1 def test_overlap_detection1(self): # test this containment relationship between test and index licenses: @@ -357,12 +357,12 @@ def test_overlap_detection1(self): querys = 'Redistribution and use bla permitted.' # test : license1 is in the index and contains no other rule. should return rule1 at exact coverage. matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert Span(0, 3) == match.qspan - assert rule1 == match.rule + assert match.qspan == Span(0, 3) + assert match.rule == rule1 qtext, _itext = get_texts(match) - assert 'Redistribution and use [bla] permitted.' == qtext + assert qtext == 'Redistribution and use [bla] permitted.' def test_overlap_detection2(self): # test this containment relationship between test and index licenses: @@ -386,11 +386,11 @@ def test_overlap_detection2(self): querys = 'Redistribution and use bla permitted.' matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert rule1 == match.rule + assert match.rule == rule1 qtext, _itext = get_texts(match) - assert 'Redistribution and use [bla] permitted.' == qtext + assert qtext == 'Redistribution and use [bla] permitted.' def test_overlap_detection2_exact(self): # test this containment relationship between test and index licenses: @@ -414,11 +414,11 @@ def test_overlap_detection2_exact(self): querys = 'Redistribution and use bla permitted.' matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert rule1 == match.rule + assert match.rule == rule1 qtext, _itext = get_texts(match) - assert 'Redistribution and use [bla] permitted.' == qtext + assert qtext == 'Redistribution and use [bla] permitted.' def test_overlap_detection3(self): # test this containment relationship between test and index licenses: @@ -453,15 +453,15 @@ def test_overlap_detection3(self): # test : querys contains license2 that contains license1: return license2 as exact coverage matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert rule2 == match.rule + assert match.rule == rule2 qtext, _itext = get_texts(match) expected = ''' Redistributions of source must retain copyright. Redistribution and use permitted. Redistributions in binary form is permitted.'''.split() - assert expected == qtext.split() + assert qtext.split() == expected def test_overlap_detection4(self): # test this containment relationship between test and index licenses: @@ -491,11 +491,11 @@ def test_overlap_detection4(self): # test : querys contains license1: return license1 as exact coverage matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert rule1 == match.rule + assert match.rule == rule1 qtext, _itext = get_texts(match) - assert 'Redistribution and use permitted.' == qtext + assert qtext == 'Redistribution and use permitted.' def test_overlap_detection5(self): # test this containment relationship between test and index licenses: @@ -525,12 +525,12 @@ def test_overlap_detection5(self): # test : querys contains license1: return license1 as exact coverage matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert rule1 == match.rule + assert match.rule == rule1 qtext, _itext = get_texts(match) - assert 'Redistribution and use permitted for MIT license.' == qtext + assert qtext == 'Redistribution and use permitted for MIT license.' def test_fulltext_detection_works_with_partial_overlap_from_location(self): test_doc = self.get_test_loc('detect/templates/license3.txt') @@ -539,19 +539,19 @@ def test_fulltext_detection_works_with_partial_overlap_from_location(self): query_loc = self.get_test_loc('detect/templates/license4.txt') matches = idx.match(query_loc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert Span(0, 41) == match.qspan - assert Span(0, 41) == match.ispan - assert 100 == match.coverage() - assert 100 == match.score() + assert match.qspan == Span(0, 41) + assert match.ispan == Span(0, 41) + assert match.coverage() == 100 + assert match.score() == 100 qtext, _itext = get_texts(match) expected = ''' is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version.''' - assert ' '.join(expected.split()) == ' '.join(qtext.split()) + assert ' '.join(qtext.split()) == ' '.join(expected.split()) class TestIndexPartialMatch(FileBasedTesting): @@ -584,10 +584,10 @@ def test_match_can_match_with_plain_rule_simple(self): query_loc = self.get_test_loc('detect/simple_detection/x11-xconsortium_text.txt') matches = idx.match(query_loc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert Span(0, 213) == match.qspan + assert match.qspan == Span(0, 213) def test_match_can_match_with_plain_rule_simple2(self): rule_text = u'''X11 License @@ -616,7 +616,7 @@ def test_match_can_match_with_plain_rule_simple2(self): query_loc = self.get_test_loc('detect/simple_detection/x11-xconsortium_text.txt') matches = idx.match(location=query_loc) - assert 1 == len(matches) + assert len(matches) == 1 expected_qtext = u''' X11 License @@ -642,7 +642,7 @@ def test_match_can_match_with_plain_rule_simple2(self): '''.split() match = matches[0] qtext, _itext = get_texts(match) - assert expected_qtext == qtext.split() + assert qtext.split() == expected_qtext def test_match_can_match_with_simple_rule_template2(self): rule_text = u''' @@ -662,7 +662,7 @@ def test_match_can_match_with_simple_rule_template2(self): ''' matches = idx.match(query_string=query_string) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] qtext, itext = get_texts(match) @@ -672,14 +672,14 @@ def test_match_can_match_with_simple_rule_template2(self): OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. '''.split() - assert expected_qtokens == qtext.split() + assert qtext.split() == expected_qtokens expected_itokens = u''' IN NO EVENT SHALL THE BE LIABLE FOR ANY CLAIM DAMAGES OR OTHER LIABILITY WHETHER IN AN ACTION OF CONTRACT TORT OR OTHERWISE ARISING FROM OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE '''.lower().split() - assert expected_itokens == itext.split() + assert itext.split() == expected_itokens def test_match_can_match_discontinuous_rule_text_1(self): # in this template text there are only 2 tokens between the two templates markers @@ -692,9 +692,9 @@ def test_match_can_match_discontinuous_rule_text_1(self): reproduce the word for word above copyright notice.''' matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert 100 == match.coverage() + assert match.coverage() == 100 assert 36.67 == match.score() assert Span(0, 9) == match.qspan assert Span(0, 9) == match.ispan @@ -710,13 +710,13 @@ def test_match_can_match_discontinuous_rule_text_2(self): reproduce the stipulated word for word above copyright notice.''' matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert 100 == match.coverage() - assert 41.94 == match.score() - assert Span(0, 10) == match.qspan - assert Span(0, 10) == match.ispan + assert match.coverage() == 100 + assert match.score() == 41.94 + assert match.qspan == Span(0, 10) + assert match.ispan == Span(0, 10) def test_match_can_match_discontinuous_rule_text_3(self): # in this template there are 4 tokens between the two templates markers @@ -729,11 +729,11 @@ def test_match_can_match_discontinuous_rule_text_3(self): reproduce as is stipulated the word for word above copyright notice.''' matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert Span(0, 11) == match.qspan - assert Span(0, 11) == match.ispan + assert match.qspan == Span(0, 11) + assert match.ispan == Span(0, 11) def test_match_can_match_with_sax_rule_for_public_domain(self): test_text = ''' @@ -759,7 +759,7 @@ def test_match_can_match_with_sax_rule_for_public_domain(self): ''' matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] qtext, itext = get_texts(match) @@ -769,7 +769,7 @@ def test_match_can_match_with_sax_rule_for_public_domain(self): source code, compiled code, and documentation contained in this distribution into the Public Domain. '''.split()) - assert expected_qtext == ' '.join(qtext.split()) + assert ' '.join(qtext.split()) == expected_qtext expected_itext = ' '.join(u''' I hereby abandon any property rights to @@ -777,12 +777,12 @@ def test_match_can_match_with_sax_rule_for_public_domain(self): source code compiled code and documentation contained in this distribution into the Public Domain '''.lower().split()) - assert expected_itext == ' '.join(itext.split()) + assert ' '.join(itext.split()) == expected_itext - assert 84 == match.coverage() - assert 84 == match.score() - assert Span(0, 6) | Span(13, 26) == match.qspan - assert Span(0, 6) | Span(11, 24) == match.ispan + assert match.coverage() == 84 + assert match.score() == 84 + assert match.qspan == Span(0, 6) | Span(13, 26) + assert match.ispan == Span(0, 6) | Span(11, 24) def test_match_can_match_with_rule_template_with_gap_near_start_with_few_tokens_before(self): # failed when a gapped token starts at a beginning of rule with few tokens before @@ -797,7 +797,7 @@ def test_match_can_match_with_rule_template_with_gap_near_start_with_few_tokens_ qloc = self.get_test_loc('detect/templates/license8.txt') matches = idx.match(qloc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] expected_qtokens = u""" @@ -878,17 +878,17 @@ def test_match_can_match_with_rule_template_with_gap_near_start_with_few_tokens_ '''.lower().split() qtext, itext = get_texts(match) - assert expected_qtokens == qtext.split() - assert expected_itokens == itext.split() + assert qtext.split() == expected_qtokens + assert itext.split() == expected_itokens - assert 97.52 == match.coverage() - assert 97.52 == match.score() + assert match.coverage() == 97.52 + assert match.score() == 97.52 expected = Span(2, 97) | Span(99, 124) | Span(126, 129) | Span(131, 137) | Span(147, 175) | Span(177, 250) - assert expected == match.qspan + assert match.qspan == expected expected = Span(1, 133) | Span(139, 241) - assert expected == match.ispan + assert match.ispan == expected def test_match_can_match_with_index_built_from_rule_directory_with_sun_bcls(self): rule_dir = self.get_test_loc('detect/rule_template/rules') @@ -897,11 +897,11 @@ def test_match_can_match_with_index_built_from_rule_directory_with_sun_bcls(self # at line 151 the query has an extra "Software" word inserted to avoid hash matching query_loc = self.get_test_loc('detect/rule_template/query.txt') matches = idx.match(location=query_loc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] expected = Span(0, 941) | Span(943, 1723) - assert expected == match.qspan - assert match_seq.MATCH_SEQ == match.matcher + assert match.qspan == expected + assert match.matcher == match_seq.MATCH_SEQ class TestMatchAccuracyWithFullIndex(FileBasedTesting): @@ -920,7 +920,7 @@ def check_position(self, test_path, expected, with_span=True): for match in matches: for detected in match.rule.license_keys(): results.append((detected, match.lines(), with_span and match.qspan or None)) - assert expected == results + assert results == expected def test_match_has_correct_positions_basic(self): idx = cache.get_index() @@ -934,7 +934,7 @@ def test_match_has_correct_positions_basic(self): m1 = LicenseMatch(rule=rule, qspan=Span(0, 7), ispan=Span(0, 7), start_line=1, end_line=1) m2 = LicenseMatch(rule=rule, qspan=Span(8, 15), ispan=Span(0, 7), start_line=2, end_line=2) m3 = LicenseMatch(rule=rule, qspan=Span(16, 23), ispan=Span(0, 7), start_line=3, end_line=3) - assert [m1, m2, m3] == matches + assert matches == [m1, m2, m3] def test_match_has_correct_line_positions_for_query_with_repeats(self): expected = [ @@ -955,11 +955,11 @@ def test_match_has_correct_line_positions_for_query_with_repeats(self): qtext, _itext = get_texts(match) try: - assert ex_lics == match.rule.license_keys() - assert ex_lines == match.lines() - assert ex_qtext == qtext + assert match.rule.license_keys() == ex_lics + assert match.lines() == ex_lines + assert qtext == ex_qtext except AssertionError: - assert expected[i] == (match.rule.license_keys(), match.lines(), qtext) + assert (match.rule.license_keys(), match.lines(), qtext) == expected[i] def test_match_does_not_return_spurious_match(self): expected = [] @@ -987,7 +987,7 @@ def test_match_returns_correct_lines(self): for match in matches: for detected in match.rule.license_keys(): results.append((detected, match.lines())) - assert expected == results + assert results == expected def test_match_returns_correct_lines2(self): test_location = self.get_test_loc('positions/correct_lines2') @@ -998,7 +998,7 @@ def test_match_returns_correct_lines2(self): for match in matches: for detected in match.rule.license_keys(): results.append((detected, match.lines())) - assert expected == results + assert results == expected def test_match_works_for_apache_rule(self): idx = cache.get_index() @@ -1009,18 +1009,18 @@ def test_match_works_for_apache_rule(self): ''' matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert 'apache-2.0_212.RULE' == match.rule.identifier - assert match_aho.MATCH_AHO_EXACT == match.matcher + assert match.rule.identifier == 'apache-2.0_212.RULE' + assert match.matcher == match_aho.MATCH_AHO_EXACT qtext, _itext = get_texts(match) expected = ( 'license. The Apache Software License, Version 2.0\n' 'http://www.apache.org/licenses/LICENSE-2.0.txt' ) - assert expected == qtext - assert (1, 4) == match.lines() + assert qtext == expected + assert match.lines() == (1, 4) def test_match_does_not_detect_spurrious_short_apache_rule(self): idx = cache.get_index() @@ -1029,7 +1029,7 @@ def test_match_does_not_detect_spurrious_short_apache_rule(self): Apache log4j 1.2 - Continuous Integration ''' matches = idx.match(query_string=querys) - assert [] == matches + assert matches == [] def test_match_does_not_match_false_positive_regions_properly(self): # note: this test relies on the false positive rule: @@ -1050,7 +1050,7 @@ def test_match_does_not_match_false_positive_regions_properly(self): results = [match.matched_text() for match in matches] expected = ['licensed under the LGPL license', 'license: dual BSD/GPL'] - assert expected == results + assert results == expected def test_match_has_correct_line_positions_in_automake_perl_file(self): # reported as https://github.com/nexB/scancode-toolkit/issues/88 @@ -1070,7 +1070,7 @@ def test_score_is_not_100_for_exact_match_with_extra_words(self): idx = cache.get_index() test_loc = self.get_test_loc('detect/score/test.txt') matches = idx.match(location=test_loc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] assert 99 < match.score() < 100 @@ -1078,7 +1078,7 @@ def test_match_texts_with_short_lgpl_and_gpl_notices(self): idx = cache.get_index() test_loc = self.get_test_loc('detect/short_l_and_gpls') matches = idx.match(location=test_loc) - assert 8 == len(matches) + assert len(matches) == 8 results = [m.matched_text(whole_lines=False, _usecache=False) for m in matches] expected = [ 'This software is distributed under the following licenses:', @@ -1089,7 +1089,7 @@ def test_match_texts_with_short_lgpl_and_gpl_notices(self): 'GNU Lesser General Public (LGPL)', 'GNU Lesser General Public (LGPL)', 'GNU Lesser General Public (LGPL)'] - assert expected == results + assert results == expected class TestMatchBinariesWithFullIndex(FileBasedTesting): @@ -1099,42 +1099,42 @@ def test_match_in_binary_lkms_1(self): idx = cache.get_index() qloc = self.get_test_loc('positions/ath_pci.ko') matches = idx.match(location=qloc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert ['bsd-new', 'gpl-2.0'] == match.rule.license_keys() + assert match.rule.license_keys() == ['bsd-new', 'gpl-2.0'] qtext, itext = get_texts(match) - assert 'license=Dual BSD/GPL' == qtext - assert 'license dual bsd gpl' == itext + assert qtext == 'license=Dual BSD/GPL' + assert itext == 'license dual bsd gpl' @pytest.mark.scanslow def test_match_in_binary_lkms_2(self): idx = cache.get_index() qloc = self.get_test_loc('positions/eeepc_acpi.ko') matches = idx.match(location=qloc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert ['gpl-1.0-plus'] == match.rule.license_keys() + assert match.rule.license_keys() == ['gpl-1.0-plus'] assert match.ispan == Span(0, 1) qtext, itext = get_texts(match) - assert 'license=GPL' == qtext - assert 'license gpl' == itext + assert qtext == 'license=GPL' + assert itext == 'license gpl' @pytest.mark.scanslow def test_match_in_binary_lkms_3(self): idx = cache.get_index() qloc = self.get_test_loc('positions/wlan_xauth.ko') matches = idx.match(location=qloc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert ['bsd-new', 'gpl-2.0'] == match.rule.license_keys() - assert 100 == match.coverage() - assert 100 == match.score() + assert match.rule.license_keys() == ['bsd-new', 'gpl-2.0'] + assert match.coverage() == 100 + assert match.score() == 100 qtext, itext = get_texts(match) - assert 'license=Dual BSD/GPL' == qtext - assert 'license dual bsd gpl' == itext - assert Span(0, 3) == match.ispan + assert qtext == 'license=Dual BSD/GPL' + assert itext == 'license dual bsd gpl' + assert match.ispan == Span(0, 3) class TestRegression(FileBasedTesting): @@ -1144,24 +1144,24 @@ def test_detection_does_not_munge_first_matched_word(self): idx = cache.get_index() qloc = self.get_test_loc('detect/truncated/seq-match-truncated.bug') matches = idx.match(location=qloc) - assert 2 == len(matches) + assert len(matches) == 2 match = matches[1] matched_text = match.matched_text(whole_lines=False, _usecache=False) first_word = matched_text.split()[0] - assert 'Permission' == first_word + assert first_word == 'Permission' def test_detection_does_merge_contained_matches_separated_by_false_positive(self): idx = cache.get_index() qloc = self.get_test_loc('detect/contained/moz') matches = idx.match(location=qloc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] matched_text = match.matched_text(whole_lines=False, _usecache=False) words = matched_text.split() first_words = words[0: 3] - assert ['BEGIN', 'LICENSE', 'BLOCK'] == first_words + assert first_words == ['BEGIN', 'LICENSE', 'BLOCK'] last_words = words[-4:-1] - assert ['END', 'LICENSE', 'BLOCK'] == last_words + assert last_words == ['END', 'LICENSE', 'BLOCK'] def test_detection_return_correct_lgpl_with_correct_text_using_full_index(self): idx = cache.get_index() @@ -1185,8 +1185,8 @@ def test_detection_return_correct_lgpl_with_correct_text_using_full_index(self): ) match = matches[0] rule = match.rule - assert expected == results - assert 'gpl-2.0-plus' == rule.license_expression + assert results == expected + assert rule.license_expression == 'gpl-2.0-plus' def test_detection_return_correct_lgpl_with_correct_text_using_controlled_index(self): from licensedcode import models @@ -1212,7 +1212,7 @@ def test_detection_return_correct_lgpl_with_correct_text_using_controlled_index( ' * along with [testVMX]; if not, write to the Free Software\n' ' * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307' ) - assert expected == results + assert results == expected matched_rule = matches[0].rule - assert 'gpl-2.0-plus_258.RULE' == matched_rule.identifier - assert 'gpl-2.0-plus' == matched_rule.license_expression + assert matched_rule.identifier == 'gpl-2.0-plus_258.RULE' + assert matched_rule.license_expression == 'gpl-2.0-plus' diff --git a/tests/licensedcode/test_detection_validate.py b/tests/licensedcode/test_detection_validate.py index 986a229f8bf..94e747176b4 100644 --- a/tests/licensedcode/test_detection_validate.py +++ b/tests/licensedcode/test_detection_validate.py @@ -80,7 +80,7 @@ def check_rule_or_license_can_be_self_detected_exactly(rule): results = flatten((m.rule.identifier, str(int(m.coverage()))) for m in matches) try: - assert expected == results + assert results == expected except: from licensedcode.tracing import get_texts @@ -115,7 +115,7 @@ def check_rule_or_license_can_be_self_detected_exactly(rule): ]) # this assert will always fail and provide a detailed failure trace - assert '\n'.join(expected) == '\n'.join(failure_trace) + assert '\n'.join(failure_trace) == '\n'.join(expected) def check_ignorable_clues(rule, regen=False): @@ -165,7 +165,7 @@ def check_ignorable_clues(rule, regen=False): expected = dict([(k, v) for k, v in sorted(expected.items()) if v]) try: - assert expected == results + assert results == expected except: # On failure, we compare againto get additional failure details such as # a clickable text_file path @@ -178,7 +178,7 @@ def check_ignorable_clues(rule, regen=False): 'file://{text_file}'.format(**locals()), ] # this assert will always fail and provide a more detailed failure trace - assert saneyaml.dump(expected) == saneyaml.dump(results) + assert saneyaml.dump(results) == saneyaml.dump(expected) def build_validation_tests(rules, class_basic, class_extended): diff --git a/tests/licensedcode/test_index.py b/tests/licensedcode/test_index.py index 61acb42667e..8248e116b3c 100644 --- a/tests/licensedcode/test_index.py +++ b/tests/licensedcode/test_index.py @@ -60,12 +60,12 @@ def test_index_structures(self): rules = [models.Rule(stored_text=t[0]) for t in test_rules] idx._add_rules(rules, _legalese=mini_legalese,) - assert 40 == idx.len_legalese + assert idx.len_legalese == 40 expected_lengths = [r[1] for r in test_rules] results = [ (rule.length_unique, rule.high_length_unique, rule.length, rule.high_length) for rule in rules] - assert expected_lengths == results + assert results == expected_lengths expected = set([ 'bsd', @@ -85,7 +85,7 @@ def test_index_structures(self): xdict = {key for key, val in idx.dictionary.items() if val >= idx.len_legalese} - assert expected == xdict + assert xdict == expected xtbi = sorted([ 'one', @@ -103,7 +103,7 @@ def test_index_structures(self): 'bsd', 'lgpl']) - assert xtbi == sorted([t for i, t in enumerate(idx.tokens_by_tid) if i >= idx.len_legalese]) + assert sorted([t for i, t in enumerate(idx.tokens_by_tid) if i >= idx.len_legalese]) == xtbi def test_index_structures_with__add_rules(self): base = self.get_test_loc('index/tokens_count') @@ -116,7 +116,7 @@ def test_index_structures_with__add_rules(self): idx._add_rules(rules, _legalese=mini_legalese) - assert 40 == idx.len_legalese + assert idx.len_legalese == 40 expected = set([ 'all', @@ -131,7 +131,7 @@ def test_index_structures_with__add_rules(self): xdict = {key for key, val in idx.dictionary.items() if val >= idx.len_legalese} - assert expected == xdict + assert xdict == expected xtbi = sorted([ 'all', @@ -145,7 +145,7 @@ def test_index_structures_with__add_rules(self): 'yes' ]) - assert xtbi == sorted([t for i, t in enumerate(idx.tokens_by_tid) if i >= idx.len_legalese]) + assert sorted([t for i, t in enumerate(idx.tokens_by_tid) if i >= idx.len_legalese]) == xtbi expected_msets_by_rid = [ {u'redistribution': 1}, @@ -175,7 +175,7 @@ def test_index_structures_with__add_rules(self): htmset = [{idx.tokens_by_tid[tok]: freq for (tok, freq) in tids_mset.items()} for tids_mset in idx.msets_by_rid] - assert expected_msets_by_rid == htmset + assert htmset == expected_msets_by_rid def test_index_fails_on_duplicated_rules(self): rule_dir = self.get_test_loc('index/no_duplicated_rule') @@ -205,14 +205,14 @@ def test_match_exact_from_string_once(self): Always''' result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] qtext, itext = get_texts(match) - assert 'Redistribution and use in source and binary forms, with or without modification,\nare permitted.' == qtext - assert 'redistribution and use in source and binary forms with or without modification\nare permitted' == itext + assert qtext == 'Redistribution and use in source and binary forms, with or without modification,\nare permitted.' + assert itext == 'redistribution and use in source and binary forms with or without modification\nare permitted' - assert Span(0, 13) == match.qspan - assert Span(0, 13) == match.ispan + assert match.qspan == Span(0, 13) + assert match.ispan == Span(0, 13) def test_match_exact_from_string_twice_with_repeated_text(self): _stored_text = u'licensed under the GPL, licensed under the GPL' @@ -225,25 +225,25 @@ def test_match_exact_from_string_twice_with_repeated_text(self): # 0 1 2 3 4 5 6 7 8 9 result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] qtext, itext = get_texts(match) - assert 'licensed under the GPL, licensed under the GPL' == qtext - assert 'licensed under the gpl licensed under the gpl' == itext + assert qtext == 'licensed under the GPL, licensed under the GPL' + assert itext == 'licensed under the gpl licensed under the gpl' - assert Span(0, 7) == match.qspan - assert Span(0, 7) == match.ispan + assert match.qspan == Span(0, 7) + assert match.ispan == Span(0, 7) # match again to ensure that there are no state side effects result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] - assert Span(0, 7) == match.qspan - assert Span(0, 7) == match.ispan + assert match.qspan == Span(0, 7) + assert match.ispan == Span(0, 7) qtext, itext = get_texts(match) - assert u'licensed under the GPL, licensed under the GPL' == qtext - assert u'licensed under the gpl licensed under the gpl' == itext + assert qtext == u'licensed under the GPL, licensed under the GPL' + assert itext == u'licensed under the gpl licensed under the gpl' def test_match_exact_with_junk_in_between_good_tokens(self): _stored_text = u'licensed under the GPL, licensed under the GPL' @@ -254,26 +254,26 @@ def test_match_exact_with_junk_in_between_good_tokens(self): querys = u'Hi licensed that under is the that GPL, licensed or under not the GPL by yes.' result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] qtext, itext = get_texts(match) - assert u'licensed [that] under [is] the [that] GPL, licensed [or] under [not] the GPL' == qtext - assert u'licensed under the gpl licensed under the gpl' == itext + assert qtext == u'licensed [that] under [is] the [that] GPL, licensed [or] under [not] the GPL' + assert itext == u'licensed under the gpl licensed under the gpl' def test_match_exact_from_file(self): idx = MiniLicenseIndex(self.get_test_rules('index/mini')) query_loc = self.get_test_loc('index/queryperfect-mini') result = idx.match(location=query_loc) - assert 1 == len(result) + assert len(result) == 1 match = result[0] qtext, itext = get_texts(match) - assert 'Redistribution and use in source and binary forms, with or without modification,\nare permitted.' == qtext - assert 'redistribution and use in source and binary forms with or without modification\nare permitted' == itext + assert qtext == 'Redistribution and use in source and binary forms, with or without modification,\nare permitted.' + assert itext == 'redistribution and use in source and binary forms with or without modification\nare permitted' - assert Span(0, 13) == match.qspan - assert Span(0, 13) == match.ispan + assert match.qspan == Span(0, 13) + assert match.ispan == Span(0, 13) def test_match_multiple(self): test_rules = self.get_test_rules('index/bsd') @@ -281,10 +281,10 @@ def test_match_multiple(self): query = self.get_test_loc('index/querysimple') result = idx.match(location=query) - assert 1 == len(result) + assert len(result) == 1 match = result[0] - assert Span(0, 211) == match.qspan - assert Span(0, 211) == match.ispan + assert match.qspan == Span(0, 211) + assert match.ispan == Span(0, 211) def test_match_return_correct_offsets(self): # notes: A is a stopword. This and that are not @@ -298,14 +298,14 @@ def test_match_return_correct_offsets(self): # 0 1 2 3 4 5 6 7 result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] qtext, itext = get_texts(match) - assert 'this GPL. A MIT. that LGPL.' == qtext - assert 'this gpl mit that lgpl' == itext + assert qtext == 'this GPL. A MIT. that LGPL.' + assert itext == 'this gpl mit that lgpl' - assert Span(0, 4) == match.qspan - assert Span(0, 4) == match.ispan + assert match.qspan == Span(0, 4) + assert match.ispan == Span(0, 4) class TestMatchWithTemplates(IndexTesting): @@ -349,9 +349,9 @@ def test_match_with_template_and_multiple_rules(self): No part of match ''' result = idx.match(query_string=querys) print('here3') - assert 1 == len(result) + assert len(result) == 1 match = result[0] - assert match_seq.MATCH_SEQ == match.matcher + assert match.matcher == match_seq.MATCH_SEQ exp_qtext = u""" Redistribution and use in source and binary forms, with or without modification, @@ -410,13 +410,13 @@ def test_match_with_template_and_multiple_rules(self): """.lower().split() qtext, itext = get_texts(match) - assert exp_qtext == qtext.split() - assert exp_itext == itext.split() + assert qtext.split() == exp_qtext + assert itext.split() == exp_itext - assert (Span(1, 72) | Span(74, 211)) == match.qspan + assert match.qspan == (Span(1, 72) | Span(74, 211)) - assert Span(0, 209) == match.ispan - assert 100 == match.coverage() + assert match.ispan == Span(0, 209) + assert match.coverage() == 100 def test_match_to_indexed_template_with_few_tokens_around_gaps(self): # Was failing when a gap in a template starts very close to the start of @@ -432,7 +432,7 @@ def test_match_to_indexed_template_with_few_tokens_around_gaps(self): query_loc = self.get_test_loc('index/templates/query.txt') result = idx.match(location=query_loc) - assert 1 == len(result) + assert len(result) == 1 match = result[0] exp_qtext = u""" @@ -517,10 +517,10 @@ def test_match_to_indexed_template_with_few_tokens_around_gaps(self): ADVISED OF THE DAMAGE """.lower().split() qtext, itext = get_texts(match) - assert exp_qtext == qtext.split() - assert exp_itext == itext.split() + assert qtext.split() == exp_qtext + assert itext.split() == exp_itext assert match.coverage() > 97 - assert match_seq.MATCH_SEQ == match.matcher + assert match.matcher == match_seq.MATCH_SEQ def test_match_with_templates_with_redundant_tokens_yield_single_exact_match(self): _stored_text = u'copyright reserved mit is license, {{}} copyright reserved mit is license' @@ -538,18 +538,18 @@ def test_match_with_templates_with_redundant_tokens_yield_single_exact_match(sel expected = [None, None, u'copyright', u'reserved', u'mit', u'is', u'license', u'is', None, u'copyright', u'reserved', u'mit', u'is', u'license', None] # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 - assert expected == tks_as_str(qry.tokens_with_unknowns()) + assert tks_as_str(qry.tokens_with_unknowns()) == expected result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] - assert Span(0, 4) | Span(6, 10) == match.qspan - assert Span(0, 9) == match.ispan - assert 100 == match.coverage() + assert match.qspan == Span(0, 4) | Span(6, 10) + assert match.ispan == Span(0, 9) + assert match.coverage() == 100 qtext, itext = get_texts(match) - assert 'copyright reserved mit is license [is] [the] copyright reserved mit is license' == qtext - assert 'copyright reserved mit is license copyright reserved mit is license' == itext + assert qtext == 'copyright reserved mit is license [is] [the] copyright reserved mit is license' + assert itext == 'copyright reserved mit is license copyright reserved mit is license' class TestIndexDumpLoad(IndexTesting): @@ -565,7 +565,7 @@ def test_dumps_loads_default(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected def test_dump_load_default(self): test_rules = self.get_test_rules('index/dump_load') @@ -580,11 +580,11 @@ def test_dump_load_default(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected with open(test_dump, 'rb') as td: idx3 = index.LicenseIndex.loads(td.read()) - assert expected == sorted([k for k, v in idx3.dictionary.items() if v >= idx3.len_legalese]) + assert sorted([k for k, v in idx3.dictionary.items() if v >= idx3.len_legalese]) == expected def test_dumps_fast_loads_fast(self): test_rules = self.get_test_rules('index/dump_load') @@ -596,7 +596,7 @@ def test_dumps_fast_loads_fast(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected def test_dumps_slow_loads_slow(self): test_rules = self.get_test_rules('index/dump_load') @@ -608,7 +608,7 @@ def test_dumps_slow_loads_slow(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected def test_dumps_fast_loads_slow(self): test_rules = self.get_test_rules('index/dump_load') @@ -620,7 +620,7 @@ def test_dumps_fast_loads_slow(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected def test_dumps_slow_loads_fast(self): test_rules = self.get_test_rules('index/dump_load') @@ -632,7 +632,7 @@ def test_dumps_slow_loads_fast(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected def test_dump_fast_load_fast(self): test_rules = self.get_test_rules('index/dump_load') @@ -647,7 +647,7 @@ def test_dump_fast_load_fast(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected def test_dump_fast_load_slow(self): test_rules = self.get_test_rules('index/dump_load') @@ -662,7 +662,7 @@ def test_dump_fast_load_slow(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected def test_dump_slow_load_slow(self): test_rules = self.get_test_rules('index/dump_load') @@ -677,7 +677,7 @@ def test_dump_slow_load_slow(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected def test_dump_slow_load_fast(self): test_rules = self.get_test_rules('index/dump_load') @@ -692,4 +692,4 @@ def test_dump_slow_load_fast(self): u'copyright', u'following', u'forms', u'holder', u'in', u'is', u'met', u'permitted', u'provided', u'redistribution', u'software', u'source', u'that', u'the', u'this', u'use'] - assert expected == sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) + assert sorted([k for k, v in idx2.dictionary.items() if v >= idx2.len_legalese]) == expected diff --git a/tests/licensedcode/test_legal.py b/tests/licensedcode/test_legal.py index b42b3bc991f..9c655e03aa9 100644 --- a/tests/licensedcode/test_legal.py +++ b/tests/licensedcode/test_legal.py @@ -20,34 +20,34 @@ class TestSpecialFiles(FileBasedTesting): def test_is_special_legal_file_COPYING(self): test_loc = self.get_test_loc('legal/COPYING') expected = 'yes' - assert expected == legal.is_special_legal_file(test_loc) + assert legal.is_special_legal_file(test_loc) == expected def test_is_special_legal_file_Copyrights(self): test_loc = self.get_test_loc('legal/Copyrights') expected = 'yes' - assert expected == legal.is_special_legal_file(test_loc) + assert legal.is_special_legal_file(test_loc) == expected def test_is_special_legal_file_LICENSE(self): test_loc = self.get_test_loc('legal/LICENSE') expected = 'yes' - assert expected == legal.is_special_legal_file(test_loc) + assert legal.is_special_legal_file(test_loc) == expected def test_is_special_legal_file_Notice(self): test_loc = self.get_test_loc('legal/Notice') expected = 'yes' - assert expected == legal.is_special_legal_file(test_loc) + assert legal.is_special_legal_file(test_loc) == expected def test_is_special_legal_file_no_license_in_here_java(self): test_loc = self.get_test_loc('legal/no_license_in_here.java') expected = 'maybe' - assert expected == legal.is_special_legal_file(test_loc) + assert legal.is_special_legal_file(test_loc) == expected def test_is_special_legal_file_noticE_html(self): test_loc = self.get_test_loc('legal/noticE.html') expected = 'yes' - assert expected == legal.is_special_legal_file(test_loc) + assert legal.is_special_legal_file(test_loc) == expected def test_is_special_legal_file_useless_notice_txt(self): test_loc = self.get_test_loc('legal/useless_notice.txt') expected = 'maybe' - assert expected == legal.is_special_legal_file(test_loc) + assert legal.is_special_legal_file(test_loc) == expected diff --git a/tests/licensedcode/test_match.py b/tests/licensedcode/test_match.py index e1b46239e6d..c8d763859e4 100644 --- a/tests/licensedcode/test_match.py +++ b/tests/licensedcode/test_match.py @@ -156,8 +156,8 @@ def test_combine_matches_with_same_rules(self): m2 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6)) match = m1.combine(m2) - assert Span(0, 6) == match.qspan - assert Span(0, 6) == match.ispan + assert match.qspan == Span(0, 6) + assert match.ispan == Span(0, 6) def test_combine_matches_cannot_combine_matches_with_same_licensing_and_different_rules(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -263,7 +263,7 @@ def test_merge_does_merge_non_contiguous_matches_in_sequence(self): m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6)) results = merge_matches([m1, m2, m5]) - assert [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] == results + assert results == [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] def test_merge_does_not_merge_overlapping_matches_of_different_rules_with_different_licensing(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -272,7 +272,7 @@ def test_merge_does_not_merge_overlapping_matches_of_different_rules_with_differ m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5)) m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6)) - assert [m1, m2] == merge_matches([m1, m2]) + assert merge_matches([m1, m2]) == [m1, m2] def test_merge_does_merge_overlapping_matches_of_same_rules_if_in_sequence(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -280,7 +280,7 @@ def test_merge_does_merge_overlapping_matches_of_same_rules_if_in_sequence(self) m1 = LicenseMatch(rule=r1, qspan=Span(0, 5), ispan=Span(0, 5)) m2 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6)) - assert [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] == merge_matches([m1, m2]) + assert merge_matches([m1, m2]) == [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_sequence_with_gaps(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -291,7 +291,7 @@ def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_sequence_w expected = [LicenseMatch(rule=r1, qspan=Span(1, 3) | Span(14, 20), ispan=Span(1, 10))] results = merge_matches([m1, m2]) - assert expected == results + assert results == expected def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_sequence_with_gaps_for_long_match(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -301,7 +301,7 @@ def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_sequence_w expected = [LicenseMatch(rule=r1, qspan=Span(1, 10) | Span(14, 20), ispan=Span(1, 10) | Span(14, 20))] results = merge_matches([m1, m2]) - assert expected == results + assert results == expected def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_not_sequence(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -310,7 +310,7 @@ def test_merge_does_not_merge_overlapping_matches_of_same_rules_if_in_not_sequen m2 = LicenseMatch(rule=r1, qspan=Span(14, 20), ispan=Span(1, 3)) matches = merge_matches([m1, m2]) - assert sorted([m1, m2]) == sorted(matches) + assert sorted(matches) == sorted([m1, m2]) def test_merge_does_not_merge_contained_matches_of_different_rules_with_same_licensing(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -320,7 +320,7 @@ def test_merge_does_not_merge_contained_matches_of_different_rules_with_same_lic m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6)) matches = merge_matches([m1, m2]) - assert sorted([m1, m2]) == sorted(matches) + assert sorted(matches) == sorted([m1, m2]) def test_files_does_filter_contained_matches_of_different_rules_with_same_licensing(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -330,8 +330,8 @@ def test_files_does_filter_contained_matches_of_different_rules_with_same_licens m2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6)) matches, discarded = filter_contained_matches([m1, m2]) - assert [m2] == matches - assert [m1] == discarded + assert matches == [m2] + assert discarded == [m1] def test_merge_does_not_merge_overlaping_matches_with_same_licensings(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -346,7 +346,7 @@ def test_merge_does_not_merge_overlaping_matches_with_same_licensings(self): LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6)), ] - assert sorted(expected) == sorted(result) + assert sorted(result) == sorted(expected) def test_filter_contained_matches_only_filter_contained_matches_with_same_licensings(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -357,7 +357,7 @@ def test_filter_contained_matches_only_filter_contained_matches_with_same_licens same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6)) matches, discarded = filter_contained_matches([overlap, same_span1, same_span2]) - assert [overlap, same_span1] == matches + assert matches == [overlap, same_span1] assert discarded def test_filter_overlaping_matches_does_filter_overlaping_matches_with_same_licensings(self): @@ -369,7 +369,7 @@ def test_filter_overlaping_matches_does_filter_overlaping_matches_with_same_lice same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 6), ispan=Span(1, 6)) matches, discarded = filter_overlapping_matches([overlap, same_span1, same_span2]) - assert [overlap] == matches + assert matches == [overlap] assert discarded def test_filter_contained_matches_prefers_longer_overlaping_matches(self): @@ -381,7 +381,7 @@ def test_filter_contained_matches_prefers_longer_overlaping_matches(self): same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 8), ispan=Span(1, 8)) matches, discarded = filter_contained_matches([overlap, same_span1, same_span2]) - assert [overlap, same_span2] == matches + assert matches == [overlap, same_span2] assert discarded def test_filter_overlapping_matches_prefers_longer_overlaping_matches(self): @@ -393,7 +393,7 @@ def test_filter_overlapping_matches_prefers_longer_overlaping_matches(self): same_span2 = LicenseMatch(rule=r2, qspan=Span(1, 8), ispan=Span(1, 8)) matches, discarded = filter_overlapping_matches([overlap, same_span1, same_span2]) - assert [same_span2] == matches + assert matches == [same_span2] assert discarded def test_merge_contiguous_touching_matches_in_sequence(self): @@ -403,7 +403,7 @@ def test_merge_contiguous_touching_matches_in_sequence(self): result = merge_matches([m1, m2]) match = result[0] - assert LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)) == match + assert match == LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)) def test_merge_contiguous_contained_matches(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -412,7 +412,7 @@ def test_merge_contiguous_contained_matches(self): m5 = LicenseMatch(rule=r1, qspan=Span(7, 8), ispan=Span(7, 8)) result = merge_matches([m1, m2, m5]) - assert [LicenseMatch(rule=r1, qspan=Span(0, 8), ispan=Span(0, 8))] == result + assert result == [LicenseMatch(rule=r1, qspan=Span(0, 8), ispan=Span(0, 8))] def test_merge_should_not_merge_repeated_matches_out_of_sequence(self): rule = Rule(text_file='gpl-2.0_49.RULE', license_expression=u'gpl-2.0') @@ -421,7 +421,7 @@ def test_merge_should_not_merge_repeated_matches_out_of_sequence(self): m2 = LicenseMatch(rule=rule, matcher='chunk2', qspan=Span(8, 15), ispan=Span(0, 7)) m3 = LicenseMatch(rule=rule, matcher='chunk3', qspan=Span(16, 23), ispan=Span(0, 7)) result = merge_matches([m1, m2, m3]) - assert [m1, m2, m3] == result + assert result == [m1, m2, m3] def test_merge_merges_contained_and_overlapping_match(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -432,7 +432,7 @@ def test_merge_merges_contained_and_overlapping_match(self): assert contained in m1 result = merge_matches([m1, contained, overlapping]) expected = [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] - assert expected == result + assert result == expected def test_merge_does_not_merge_multiple_contained_matches_across_rules(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -448,7 +448,7 @@ def test_merge_does_not_merge_multiple_contained_matches_across_rules(self): m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6)) result = merge_matches([m1, contained1, contained2, m5]) - assert sorted([m1, contained1, contained2, m5]) == sorted(result) + assert sorted(result) == sorted([m1, contained1, contained2, m5]) def test_filter_contained_matches_does_filter_across_rules(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -464,7 +464,7 @@ def test_filter_contained_matches_does_filter_across_rules(self): m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6)) result, _discarded = filter_contained_matches([m1, contained1, contained2, m5]) - assert [m1, m5] == result + assert result == [m1, m5] def test_filter_overlapping_matches_does_not_filter_multiple_contained_matches_across_rules(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -480,7 +480,7 @@ def test_filter_overlapping_matches_does_not_filter_multiple_contained_matches_a m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6)) result, _discarded = filter_overlapping_matches([m1, contained1, contained2, m5]) - assert [m1] == result + assert result == [m1] def test_filter_contained_matches_filters_multiple_contained_matches(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -496,8 +496,8 @@ def test_filter_contained_matches_filters_multiple_contained_matches(self): m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6)) matches, discarded = filter_contained_matches([m1, contained1, contained2, m5]) - assert [m1, m5] == matches - assert sorted([contained1, contained2, ]) == sorted(discarded) + assert matches == [m1, m5] + assert sorted(discarded) == sorted([contained1, contained2, ]) def test_filter_overlapping_matches_filters_multiple_contained_matches(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -513,8 +513,8 @@ def test_filter_overlapping_matches_filters_multiple_contained_matches(self): m5 = LicenseMatch(rule=r5, qspan=Span(1, 6), ispan=Span(1, 6)) matches, discarded = filter_overlapping_matches([m1, contained1, contained2, m5]) - assert [m1] == matches - assert sorted([m5, contained1, contained2, ]) == sorted(discarded) + assert matches == [m1] + assert sorted(discarded) == sorted([m5, contained1, contained2, ]) def test_merge_does_not_merge_matches_with_same_spans_if_licenses_are_identical_but_rule_differ(self): r1 = Rule(text_file='r1', license_expression='apache-2.0') @@ -525,7 +525,7 @@ def test_merge_does_not_merge_matches_with_same_spans_if_licenses_are_identical_ m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2)) matches = merge_matches([m1, m2, m5]) - assert sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2]) == sorted(matches) + assert sorted(matches) == sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2]) def test_filter_contained_matches_filters_matches_with_same_spans_if_licenses_are_identical_but_rule_differ(self): r1 = Rule(text_file='r1', license_expression='apache-2.0') @@ -537,7 +537,7 @@ def test_filter_contained_matches_filters_matches_with_same_spans_if_licenses_ar matches, discarded = filter_contained_matches([m1, m2, m5]) - assert [m1, m5] == matches + assert matches == [m1, m5] assert discarded def test_filter_overlapping_matches_filters_matches_with_same_spans_if_licenses_are_identical_but_rule_differ(self): @@ -550,7 +550,7 @@ def test_filter_overlapping_matches_filters_matches_with_same_spans_if_licenses_ matches, discarded = filter_overlapping_matches([m1, m2, m5]) - assert [m5] == matches + assert matches == [m5] assert discarded def test_merge_then_filter_matches_with_same_spans_if_licenses_are_identical_but_rule_differ(self): @@ -564,7 +564,7 @@ def test_merge_then_filter_matches_with_same_spans_if_licenses_are_identical_but matches = merge_matches([m1, m2, m5]) matches, discarded = filter_contained_matches(matches) - assert [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] == matches + assert matches == [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] assert discarded def test_merge_overlapping_matches(self): @@ -573,7 +573,7 @@ def test_merge_overlapping_matches(self): m2 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6)) matches = merge_matches([m1, m2]) - assert [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] == matches + assert matches == [LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6))] def test_merge_does_not_merges_matches_with_same_spans_if_licenses_are_the_same_but_have_different_licenses_ordering(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -584,7 +584,7 @@ def test_merge_does_not_merges_matches_with_same_spans_if_licenses_are_the_same_ m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2)) result = merge_matches([m1, m2, m5]) - assert sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2]) == sorted(result) + assert sorted(result) == sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2]) def test_merge_does_not_merges_matches_with_same_spans_if_rules_are_different(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -595,7 +595,7 @@ def test_merge_does_not_merges_matches_with_same_spans_if_rules_are_different(se m2 = LicenseMatch(rule=r2, qspan=Span(0, 2), ispan=Span(0, 2)) result = merge_matches([m1, m2, m5]) - assert sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2]) == sorted(result) + assert sorted(result) == sorted([LicenseMatch(rule=r1, qspan=Span(0, 6), ispan=Span(0, 6)), m2]) def test_merge_merges_duplicate_matches(self): r1 = Rule(text_file='r1', license_expression='apache-2.0') @@ -603,7 +603,7 @@ def test_merge_merges_duplicate_matches(self): m2 = LicenseMatch(rule=r1, qspan=Span(0, 8), ispan=Span(0, 8)) matches = merge_matches([m1, m2]) - assert ([m1] == matches) or ([m2] == matches) + assert (matches == [m1]) or (matches == [m2]) def test_merge_does_not_merge_overlapping_matches_in_sequence_with_assymetric_overlap(self): r1 = Rule(text_file='r1', license_expression=u'lgpl-2.0-plus') @@ -638,7 +638,7 @@ def test_merge_does_not_merge_overlapping_matches_in_sequence_with_assymetric_ov Span(131) | Span(141)) matches = merge_matches([m1, m2]) - assert [m1, m2] == matches + assert matches == [m1, m2] class TestLicenseMatchFilter(FileBasedTesting): @@ -653,8 +653,8 @@ def test_filter_contained_matches_matches_filters_multiple_nested_contained_matc in_contained = LicenseMatch(rule=r1, qspan=Span(2, 3), ispan=Span(2, 3)) result, discarded = filter_contained_matches([m1, contained, in_contained, large_overlap]) - assert [m1, large_overlap] == result - assert [contained, in_contained] == discarded + assert result == [m1, large_overlap] + assert discarded == [contained, in_contained] def test_filter_overlapping_matches_matches_filters_multiple_nested_contained_matches_and_large_overlapping(self): r1 = Rule(text_file='r1', license_expression='apache-2.0 OR gpl') @@ -663,7 +663,7 @@ def test_filter_overlapping_matches_matches_filters_multiple_nested_contained_ma contained = LicenseMatch(rule=r1, qspan=Span(1, 4), ispan=Span(1, 4)) in_contained = LicenseMatch(rule=r1, qspan=Span(2, 3), ispan=Span(2, 3)) result, discarded = filter_overlapping_matches([m1, contained, in_contained, large_overlap]) - assert [m1] == result + assert result == [m1] assert discarded def test_filter_matches_filters_non_contiguous_or_overlapping__but_contained_matches(self): @@ -675,7 +675,7 @@ def test_filter_matches_filters_non_contiguous_or_overlapping__but_contained_mat m5 = LicenseMatch(rule=r1, qspan=Span(1, 6), ispan=Span(1, 6)) result, discarded = filter_contained_matches([m1, m2, m3, m4, m5]) - assert [m4] == result + assert result == [m4] assert discarded def test_filter_matches_filters_non_contiguous_or_overlapping_contained_matches_with_touching_boundaries(self): @@ -698,7 +698,7 @@ def test_filter_matches_filters_non_contiguous_or_overlapping_contained_matches_ m4 = LicenseMatch(rule=r4, qspan=Span(0, 7), ispan=Span(0, 7)) result, discarded = filter_contained_matches([m1, m2, m3, m4, m5, m6]) - assert [m4] == result + assert result == [m4] assert discarded def test_filter_contained_matches_matches_does_filter_matches_with_contained_spans_if_licenses_are_different(self): @@ -712,7 +712,7 @@ def test_filter_contained_matches_matches_does_filter_matches_with_contained_spa m3 = LicenseMatch(rule=r3, qspan=Span(0, 2), ispan=Span(0, 2)) matches, discarded = filter_contained_matches([m1, m2, m3]) - assert [m1, m2] == matches + assert matches == [m1, m2] assert discarded def test_filter_overlapping_matches_matches_does_filter_matches_with_contained_spans_if_licenses_are_different(self): @@ -726,7 +726,7 @@ def test_filter_overlapping_matches_matches_does_filter_matches_with_contained_s m3 = LicenseMatch(rule=r3, qspan=Span(0, 2), ispan=Span(0, 2)) matches, discarded = filter_overlapping_matches([m1, m2, m3]) - assert [m2] == matches + assert matches == [m2] assert discarded def test_filter_overlapping_matches_matches_filters_matches_with_medium_overlap_only_if_license_are_the_same(self): @@ -738,7 +738,7 @@ def test_filter_overlapping_matches_matches_filters_matches_with_medium_overlap_ m3 = LicenseMatch(rule=r2, qspan=Span(7, 15), ispan=Span(7, 15)) result, discarded = filter_overlapping_matches([m1, m2, m3]) - assert sorted([m1, m3]) == sorted(result) + assert sorted(result) == sorted([m1, m3]) assert discarded def test_filter_matches_handles_interlaced_matches_with_overlap_and_same_license(self): @@ -752,7 +752,7 @@ def test_filter_matches_handles_interlaced_matches_with_overlap_and_same_license LicenseMatch(matcher='2-aho', rule=rules['rule2.RULE'], qspan=Span(24, 85), ispan=Span(0, 61)), ] - assert expected == matches + assert matches == expected def test_filter_contained_matches_matches_filters_matches_does_not_discard_non_overlaping(self): r1 = Rule(text_file='r1', license_expression='apache-1.1') @@ -770,8 +770,8 @@ def test_filter_contained_matches_matches_filters_matches_does_not_discard_non_o m3 = LicenseMatch(rule=r3, qspan=Span(6, 120), ispan=Span(6, 120)) result, discarded = filter_contained_matches([m2, m1, m3]) - assert [m2, m3] == result - assert [m1] == discarded + assert result == [m2, m3] + assert discarded == [m1] def test_filter_overlapping_matches_matches_filters_matches_does_not_discard_non_overlaping(self): r1 = Rule(text_file='r1', license_expression='apache-1.1') @@ -789,12 +789,12 @@ def test_filter_overlapping_matches_matches_filters_matches_does_not_discard_non m3 = LicenseMatch(rule=r3, qspan=Span(6, 120), ispan=Span(6, 120)) result, discarded = filter_overlapping_matches([m2, m1, m3]) - assert [m3] == result - assert [m1, m2] == discarded + assert result == [m3] + assert discarded == [m1, m2] result, discarded = restore_non_overlapping(result, discarded) - assert [m1] == result - assert [m2] == discarded + assert result == [m1] + assert discarded == [m2] class TestLicenseMatchScore(FileBasedTesting): @@ -879,7 +879,7 @@ def test_get_full_matched_text_base(self): EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. chabada DAMAGE 12 ABC dasdasda . ''' result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] # Note that there is a trailing space in that string @@ -889,7 +889,7 @@ def test_get_full_matched_text_base(self): EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ matched_text = u''.join( get_full_matched_text(match, query_string=querys, idx=idx, _usecache=False)) - assert expected == matched_text + assert matched_text == expected expected_nh = u"""Copyright 2003 (C) James. All Rights Reserved. THIS IS FROM THE CODEHAUS AND CONTRIBUTORS @@ -898,7 +898,7 @@ def test_get_full_matched_text_base(self): matched_text_nh = u''.join( get_full_matched_text( match, query_string=querys, idx=idx, _usecache=False, highlight=False)) - assert expected_nh == matched_text_nh + assert matched_text_nh == expected_nh expected_origin_text = u"""Copyright 2003 (C) James. All Rights Reserved. THIS IS FROM THE CODEHAUS AND CONTRIBUTORS @@ -910,7 +910,7 @@ def test_get_full_matched_text_base(self): idx=idx, highlight_not_matched=u'%s', )) - assert expected_origin_text == origin_matched_text + assert origin_matched_text == expected_origin_text def test_get_full_matched_text(self): rule_text = u''' @@ -930,7 +930,7 @@ def test_get_full_matched_text(self): EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. chabada DAMAGE 12 ABC ''' result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] # Note that there is a trailing space in that string @@ -940,11 +940,11 @@ def test_get_full_matched_text(self): EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ matched_text = u''.join(get_full_matched_text(match, query_string=querys, idx=idx, _usecache=False)) - assert expected == matched_text + assert matched_text == expected # the text is finally rstripped matched_text = match.matched_text(_usecache=False) - assert expected.rstrip() == matched_text + assert matched_text == expected.rstrip() # test again using some HTML with tags # Note that there is a trailing space in that string @@ -954,7 +954,7 @@ def test_get_full_matched_text(self): EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ matched_text = u''.join(get_full_matched_text( match, query_string=querys, idx=idx, highlight_not_matched=u'
%s
', _usecache=False)) - assert expected == matched_text + assert matched_text == expected # test again using whole_lines expected = u""" foobar 45 Copyright 2003 (C) James. All Rights Reserved. @@ -963,7 +963,7 @@ def test_get_full_matched_text(self): EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. chabada DAMAGE 12 ABC\n""" matched_text = u''.join(get_full_matched_text( match, query_string=querys, idx=idx, highlight_not_matched=u'%s', whole_lines=True)) - assert expected == matched_text + assert matched_text == expected def test_get_full_matched_text_does_not_munge_underscore(self): rule_text = 'MODULE_LICENSE_GPL' @@ -973,12 +973,12 @@ def test_get_full_matched_text_does_not_munge_underscore(self): querys = 'MODULE_LICENSE_GPL' result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] expected = 'MODULE_LICENSE_GPL' matched_text = u''.join(get_full_matched_text(match, query_string=querys, idx=idx, _usecache=False)) - assert expected == matched_text + assert matched_text == expected def test_get_full_matched_text_does_not_munge_plus(self): rule_text = 'MODULE_LICENSE_GPL+ +' @@ -988,12 +988,12 @@ def test_get_full_matched_text_does_not_munge_plus(self): querys = 'MODULE_LICENSE_GPL+ +' result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] expected = 'MODULE_LICENSE_GPL+ +\n' matched_text = u''.join(get_full_matched_text(match, query_string=querys, idx=idx, _usecache=False)) - assert expected == matched_text + assert matched_text == expected def test_tokenize_matched_text_does_cache_last_call_from_query_string_and_location(self): dictionary = {'module': 0, 'license': 1, 'gpl+': 2} @@ -1058,7 +1058,7 @@ def test_tokenize_matched_text_does_return_correct_tokens(self): Token(value=' \n', line_num=4, pos=-1, is_text=False, is_matched=False, is_known=False) ] - assert expected == result + assert result == expected def test_tokenize_matched_text_does_not_crash_on_turkish_unicode(self): querys = u'İrəli' @@ -1069,7 +1069,7 @@ def test_tokenize_matched_text_does_not_crash_on_turkish_unicode(self): Token(value='rəli', line_num=1, pos=-1, is_text=True, is_matched=False, is_known=False), Token(value='\n', line_num=1, pos=-1, is_text=False, is_matched=False, is_known=False), ] - assert expected == result + assert result == expected def test_tokenize_matched_text_behaves_like_query_tokenizer_on_turkish_unicode(self): from licensedcode.tokenize import query_tokenizer @@ -1129,7 +1129,7 @@ def test_reportable_tokens_filter_tokens_does_not_strip_last_token_value(self): Token(value=u'. ', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False) ] - assert expected == result + assert result == expected # est again with whole lines match_qspan = Span(0, 1) @@ -1158,7 +1158,7 @@ def test_reportable_tokens_filter_tokens_does_not_strip_last_token_value(self): Token(value=u'THIS', line_num=2, pos=2, is_text=True, is_matched=False, is_known=True), Token(value=u'\n', line_num=2, pos=-1, is_text=False, is_matched=False, is_known=False)] - assert expected == result + assert result == expected def test_matched_text_is_collected_correctly_end2end(self): rules_data_dir = self.get_test_loc('matched_text/index/rules') @@ -1176,7 +1176,7 @@ def test_matched_text_is_collected_correctly_end2end(self): 'GPLv2 (' ] - assert expected == results + assert results == expected def check_matched_texts(self, test_loc, expected_texts, whole_lines=True): idx = cache.get_index() @@ -1186,7 +1186,7 @@ def check_matched_texts(self, test_loc, expected_texts, whole_lines=True): m.matched_text(whole_lines=whole_lines, highlight=False, _usecache=False) for m in matches ] - assert expected_texts == matched_texts + assert matched_texts == expected_texts def test_matched_text_is_collected_correctly_end2end_for_spdx_match_whole_lines(self): self.check_matched_texts( @@ -1206,31 +1206,31 @@ def test_matched_text_is_not_truncated_with_unicode_diacritic_input_from_query(s idx = cache.get_index() querys_with_diacritic_unicode = 'İ license MIT' result = idx.match(query_string=querys_with_diacritic_unicode) - assert 1 == len(result) + assert len(result) == 1 match = result[0] expected = 'license MIT' matched_text = match.matched_text(_usecache=False,) - assert expected == matched_text + assert matched_text == expected def test_matched_text_is_not_truncated_with_unicode_diacritic_input_from_file(self): idx = cache.get_index() file_with_diacritic_unicode_location = self.get_test_loc('matched_text/unicode_text/main3.js') result = idx.match(location=file_with_diacritic_unicode_location) - assert 1 == len(result) + assert len(result) == 1 match = result[0] expected = 'license MIT' matched_text = match.matched_text(_usecache=False) - assert expected == matched_text + assert matched_text == expected def test_matched_text_is_not_truncated_with_unicode_diacritic_input_from_query_whole_lines(self): idx = cache.get_index() querys_with_diacritic_unicode = 'İ license MIT' result = idx.match(query_string=querys_with_diacritic_unicode) - assert 1 == len(result) + assert len(result) == 1 match = result[0] expected = '[İ] license MIT' matched_text = match.matched_text(_usecache=False, whole_lines=True) - assert expected == matched_text + assert matched_text == expected def test_matched_text_is_not_truncated_with_unicode_diacritic_input_with_diacritic_in_rules(self): rule_dir = self.get_test_loc('matched_text/turkish_unicode/rules') @@ -1250,7 +1250,7 @@ def test_matched_text_is_not_truncated_with_unicode_diacritic_input_with_diacrit 'lİcense mit' ] - assert expected == matched_texts + assert matched_texts == expected def test_matched_text_is_not_truncated_with_unicode_diacritic_input_and_full_index(self): expected = [ @@ -1278,7 +1278,7 @@ def test_matched_text_does_not_ignores_whole_lines_in_binary_with_small_index(se expected = ['{{ .Self }} license: GPL-3 (full text at https://github.com/tianon/gosu)'] - assert expected == matched_texts + assert matched_texts == expected def test_matched_text_does_not_ignores_whole_lines_in_binary_against_full_index(self): expected = ['{{ .Self }} license: GPL-3 (full text at https://github.com/tianon/gosu)'] diff --git a/tests/licensedcode/test_match_aho.py b/tests/licensedcode/test_match_aho.py index 3856cae0b5a..d823ff4d26d 100644 --- a/tests/licensedcode/test_match_aho.py +++ b/tests/licensedcode/test_match_aho.py @@ -31,6 +31,6 @@ def test_match_freertos(self): qry = query.build_query(location=query_loc, idx=idx) matches = match_aho.exact_match(idx, qry.whole_query_run(), idx.rules_automaton) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert match_aho.MATCH_AHO_EXACT == match.matcher + assert match.matcher == match_aho.MATCH_AHO_EXACT diff --git a/tests/licensedcode/test_match_hash.py b/tests/licensedcode/test_match_hash.py index fae4dc27a8c..c971d5564d7 100644 --- a/tests/licensedcode/test_match_hash.py +++ b/tests/licensedcode/test_match_hash.py @@ -29,13 +29,13 @@ def test_match_hash_can_match_exactly(self): query_doc = self.get_test_loc('hash/rules/lgpl-2.0-plus_23.RULE') matches = idx.match(query_doc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert 100 == match.coverage() - assert match_hash.MATCH_HASH == match.matcher - assert rules[0] == match.rule - assert Span(0, 119) == match.qspan - assert Span(0, 119) == match.ispan + assert match.coverage() == 100 + assert match.matcher == match_hash.MATCH_HASH + assert match.rule == rules[0] + assert match.qspan == Span(0, 119) + assert match.ispan == Span(0, 119) def test_match_hash_returns_correct_offset(self): rule_dir = self.get_test_loc('hash/rules') @@ -43,10 +43,10 @@ def test_match_hash_returns_correct_offset(self): idx = index.LicenseIndex(rules) query_doc = self.get_test_loc('hash/query.txt') matches = idx.match(query_doc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert match_hash.MATCH_HASH == match.matcher - assert 100 == match.coverage() - assert rules[0] == match.rule - assert Span(0, 119) == match.qspan - assert Span(0, 119) == match.ispan + assert match.matcher == match_hash.MATCH_HASH + assert match.coverage() == 100 + assert match.rule == rules[0] + assert match.qspan == Span(0, 119) + assert match.ispan == Span(0, 119) diff --git a/tests/licensedcode/test_match_seq.py b/tests/licensedcode/test_match_seq.py index ed8459fde3c..e034cfe930c 100644 --- a/tests/licensedcode/test_match_seq.py +++ b/tests/licensedcode/test_match_seq.py @@ -50,9 +50,9 @@ def test_match_template_with_few_tokens_around_gaps_is_wholly_seq_matched(self): EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' result = idx.match(query_string=querys) - assert 1 == len(result) + assert len(result) == 1 match = result[0] - assert match_seq.MATCH_SEQ == match.matcher + assert match.matcher == match_seq.MATCH_SEQ exp_qtext = u""" Copyright [2003] ([C]) [James]. [All] [Rights] [Reserved]. @@ -68,9 +68,9 @@ def test_match_template_with_few_tokens_around_gaps_is_wholly_seq_matched(self): EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE """.lower().split() qtext, itext = get_texts(match) - assert exp_qtext == qtext.split() - assert exp_qtext == qtext.split() - assert exp_itext == itext.split() + assert qtext.split() == exp_qtext + assert qtext.split() == exp_qtext + assert itext.split() == exp_itext assert match.coverage() >= 70 def test_match_seq_are_correct_on_apache(self): @@ -84,9 +84,9 @@ def test_match_seq_are_correct_on_apache(self): query_loc = self.get_test_loc('match_seq/query') matches = idx.match(location=query_loc) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] - assert match_seq.MATCH_SEQ == match.matcher + assert match.matcher == match_seq.MATCH_SEQ qtext, _itext = get_texts(match) expected = u''' The OpenSymphony Group. All rights reserved. @@ -129,4 +129,4 @@ def test_match_seq_are_correct_on_apache(self): OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' - assert expected.split() == qtext.split() + assert qtext.split() == expected.split() diff --git a/tests/licensedcode/test_match_spdx_lid.py b/tests/licensedcode/test_match_spdx_lid.py index 7e3fbb8d324..28e418bf22b 100644 --- a/tests/licensedcode/test_match_spdx_lid.py +++ b/tests/licensedcode/test_match_spdx_lid.py @@ -54,7 +54,7 @@ def test_Query_with_spdx_basic(self): ('SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0',16, 34), ('SPDX-License-Identifier: GPL-2.0+ BSD-2-Clause', 45, 53)] - assert expected == qry.spdx_lines + assert qry.spdx_lines == expected def get_query_spdx_lines_test_method(test_loc , expected_loc, regen=False): @@ -76,7 +76,7 @@ def test_method(self): with open(expected_loc, 'rb') as ef: expected = json.load(ef, encoding='utf-8') - assert expected == results + assert results == expected return test_method @@ -154,7 +154,7 @@ def test_clean_line(self): 'SPDX License Identifier LGPL-2.1+' ] results = [clean_text(test) for test in tests] - assert expected == results + assert results == expected def test_prepare_text(self): tests = [ @@ -199,11 +199,11 @@ def test_prepare_text(self): ('SPDX Licence Identifier', 'LGPL-2.1+') ] results = [prepare_text(test) for test in tests] - assert expected == results + assert results == expected def test_prepare_text_with_rem(self): - assert (None, '') == prepare_text('') - assert ('SPDX-License-Identifier:', 'BSD-2-Clause-Patent') == prepare_text('@REM # SPDX-License-Identifier: BSD-2-Clause-Patent') + assert prepare_text('') == (None, '') + assert prepare_text('@REM # SPDX-License-Identifier: BSD-2-Clause-Patent') == ('SPDX-License-Identifier:', 'BSD-2-Clause-Patent') def test_split_spdx_lid(self): test = [ @@ -225,7 +225,7 @@ def test_split_spdx_lid(self): ('SPDx-Licence-Identifier : ', 'BSD-3-Clause'), (None, 'SPD-Licence-Identifier : BSD-3-Clause'), ] - assert expected == results + assert results == expected def test__split_spdx_lid(self): test = [ @@ -247,7 +247,7 @@ def test__split_spdx_lid(self): ['', 'SPDX-License-Identifier : ', 'BSD-3-Clause'], ['SPDX-License-Identifer : BSD-3-Clause'], ] - assert expected == results + assert results == expected def test_get_expression_quoted(self): licensing = Licensing() @@ -255,7 +255,7 @@ def test_get_expression_quoted(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '''LIST "SPDX-License-Identifier: GPL-2.0"''' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'gpl-2.0' == expression.render() + assert expression.render() == 'gpl-2.0' def test_get_expression_multiple_or(self): licensing = Licensing() @@ -263,7 +263,7 @@ def test_get_expression_multiple_or(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '* SPDX-License-Identifier: (BSD-3-Clause OR EPL-1.0 OR Apache-2.0 OR MIT)' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'bsd-new OR epl-1.0 OR apache-2.0 OR mit' == expression.render() + assert expression.render() == 'bsd-new OR epl-1.0 OR apache-2.0 OR mit' def test_get_expression_simple(self): licensing = Licensing() @@ -271,7 +271,7 @@ def test_get_expression_simple(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '* SPDX-License-Identifier: BSD-3-Clause' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'bsd-new' == expression.render() + assert expression.render() == 'bsd-new' def test_get_expression_with_exception(self): licensing = Licensing() @@ -279,7 +279,7 @@ def test_get_expression_with_exception(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'gpl-1.0-plus WITH linux-syscall-exception-gpl' == expression.render() + assert expression.render() == 'gpl-1.0-plus WITH linux-syscall-exception-gpl' def test_get_expression_with_plus(self): licensing = Licensing() @@ -287,7 +287,7 @@ def test_get_expression_with_plus(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '* SPDX-License-Identifier: GPL-2.0+' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'gpl-2.0-plus' == expression.render() + assert expression.render() == 'gpl-2.0-plus' def test_get_expression_with_extra_parens(self): licensing = Licensing() @@ -295,7 +295,7 @@ def test_get_expression_with_extra_parens(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '* SPDX-License-Identifier: (GPL-2.0+ OR MIT)' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'gpl-2.0-plus OR mit' == expression.render() + assert expression.render() == 'gpl-2.0-plus OR mit' def test_get_expression_extra_parens_2(self): licensing = Licensing() @@ -303,7 +303,7 @@ def test_get_expression_extra_parens_2(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'gpl-2.0 OR bsd-simplified' == expression.render() + assert expression.render() == 'gpl-2.0 OR bsd-simplified' def test_get_expression_with_parens_and_with(self): licensing = Licensing() @@ -311,7 +311,7 @@ def test_get_expression_with_parens_and_with(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) AND MIT) */' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'gpl-2.0 WITH linux-syscall-exception-gpl AND mit' == expression.render() + assert expression.render() == 'gpl-2.0 WITH linux-syscall-exception-gpl AND mit' def test_get_expression_simple_with(self): licensing = Licensing() @@ -319,7 +319,7 @@ def test_get_expression_simple_with(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '/* SPDX-License-Identifier: LGPL-2.0+ WITH Linux-syscall-note */' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'lgpl-2.0-plus WITH linux-syscall-exception-gpl' == expression.render() + assert expression.render() == 'lgpl-2.0-plus WITH linux-syscall-exception-gpl' def test_get_expression_license_ref(self): licensing = Licensing() @@ -327,7 +327,7 @@ def test_get_expression_license_ref(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '/* SPDX-License-Identifier: LicenseRef-ABC */' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert 'unknown-spdx' == expression.render() + assert expression.render() == 'unknown-spdx' def test_get_expression_complex(self): licensing = Licensing() @@ -340,10 +340,10 @@ def test_get_expression_complex(self): expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) expected = 'epl-2.0 OR apache-2.0 OR gpl-2.0 WITH classpath-exception-2.0 OR gpl-2.0' - assert expected == expression.render() + assert expression.render() == expected expected = ['epl-2.0', u'apache-2.0', u'gpl-2.0', u'classpath-exception-2.0'] - assert expected == licensing.license_keys(expression, unique=True) + assert licensing.license_keys(expression, unique=True) == expected assert all(s.wrapped for s in licensing.license_symbols(expression, decompose=True)) @@ -357,10 +357,10 @@ def test_get_expression_without_lid(self): expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) expected = 'epl-2.0 OR apache-2.0 OR gpl-2.0 WITH classpath-exception-2.0 OR gpl-2.0' - assert expected == expression.render() + assert expression.render() == expected expected = ['epl-2.0', u'apache-2.0', u'gpl-2.0', u'classpath-exception-2.0', u'gpl-2.0'] - assert expected == licensing.license_keys(expression, unique=False) + assert licensing.license_keys(expression, unique=False) == expected assert all(s.wrapped for s in licensing.license_symbols(expression, decompose=True)) @@ -376,10 +376,10 @@ def test_get_expression_complex_with_unknown_symbols_and_refs(self): expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) expected = 'epl-2.0 OR apache-2.0 OR gpl-2.0 WITH classpath-exception-2.0 OR unknown-spdx WITH unknown-spdx' - assert expected == expression.render() + assert expression.render() == expected expected = ['epl-2.0', 'apache-2.0', 'gpl-2.0', 'classpath-exception-2.0', 'unknown-spdx', 'unknown-spdx'] - assert expected == licensing.license_keys(expression, unique=False) + assert licensing.license_keys(expression, unique=False) == expected assert all(s.wrapped for s in licensing.license_symbols(expression, decompose=True)) @@ -400,7 +400,7 @@ def test_get_expression_without_and_should_not_return_unknown(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '* SPDX-License-Identifier: GPL-2.0+ BSD-2-Clause' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert unknown_symbol != expression + assert expression != unknown_symbol def test__reparse_invalid_expression_without_or_should_return_a_proper_expression(self): # this is a uboot-style legacy expression without OR @@ -410,7 +410,7 @@ def test__reparse_invalid_expression_without_or_should_return_a_proper_expressio line_text = 'GPL-2.0+ BSD-2-Clause' expression = _reparse_invalid_expression(line_text, licensing, spdx_symbols, unknown_symbol) expected = 'gpl-2.0-plus OR bsd-simplified' - assert expected == expression.render() + assert expression.render() == expected def test__reparse_invalid_expression_with_improper_keyword_should_return_a_proper_expression(self): licensing = Licensing() @@ -419,7 +419,7 @@ def test__reparse_invalid_expression_with_improper_keyword_should_return_a_prope line_text = 'or GPL-2.0+ BSD-2-Clause ' expression = _reparse_invalid_expression(line_text, licensing, spdx_symbols, unknown_symbol) expected = '(gpl-2.0-plus AND bsd-simplified) AND unknown-spdx' - assert expected == expression.render() + assert expression.render() == expected def test__reparse_invalid_expression_with_non_balanced_parens_should_return_a_proper_expression(self): licensing = Licensing() @@ -428,7 +428,7 @@ def test__reparse_invalid_expression_with_non_balanced_parens_should_return_a_pr line_text = '(GPL-2.0+ and (BSD-2-Clause ' expression = _reparse_invalid_expression(line_text, licensing, spdx_symbols, unknown_symbol) expected = '(gpl-2.0-plus AND bsd-simplified) AND unknown-spdx' - assert expected == expression.render() + assert expression.render() == expected def test__parse_expression_with_empty_expression_should_raise_ExpressionError(self): licensing = Licensing() @@ -447,7 +447,7 @@ def test_get_expression_with_empty_expression_should_return_unknown(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '* SPDX-License-Identifier:' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert None == expression + assert expression == None def test__parse_expression_with_empty_expression2_should_return_None(self): licensing = Licensing() @@ -463,7 +463,7 @@ def test_get_expression_with_empty_expression2_should_return_unknown(self): unknown_symbol = get_unknown_spdx_symbol() line_text = '' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert None == expression + assert expression == None def test_all_spdx_tokens_exists_in_dictionary(self): idx = cache.get_index() @@ -496,7 +496,7 @@ def test_get_expression_works_for_legacy_deprecated_old_spdx_symbols(self): for test, expected in exp_by_old.items(): result = get_expression( test, licensing, symbols_by_spdx, unknown_symbol).render() - assert expected == result + assert result == expected def test_spdx_match_contains_spdx_prefix(self): from licensedcode import index @@ -506,13 +506,13 @@ def test_spdx_match_contains_spdx_prefix(self): idx = index.LicenseIndex(models.get_rules(lics_dir, rule_dir)) querys = 'SPDX-license-identifier: BSD-3-Clause-No-Nuclear-Warranty' matches = idx.match(query_string=querys) - assert 1 == len(matches) + assert len(matches) == 1 match = matches[0] qtext, itext = tracing.get_texts(match) expected_qtext = 'SPDX-license-identifier: BSD-3-Clause-No-Nuclear-Warranty' - assert expected_qtext == qtext + assert qtext == expected_qtext expected_itext = 'spdx license identifier bsd 3 clause no nuclear warranty' - assert expected_itext == itext + assert itext == expected_itext def test_get_expression_does_not_fail_on_empty(self): licensing = Licensing() @@ -520,7 +520,7 @@ def test_get_expression_does_not_fail_on_empty(self): unknown_symbol = get_unknown_spdx_symbol() line_text = 'SPDX-License-Identifier: ' expression = get_expression(line_text, licensing, spdx_symbols, unknown_symbol) - assert None == expression + assert expression == None def test_Index_match_does_not_fail_on_empty(self): idx = cache.get_index() diff --git a/tests/licensedcode/test_models.py b/tests/licensedcode/test_models.py index 22a171c6da3..f5ab0145257 100644 --- a/tests/licensedcode/test_models.py +++ b/tests/licensedcode/test_models.py @@ -27,7 +27,7 @@ def check_json(expected, results, regen=False): json.dump(results, ex, indent=2, separators=(',', ': ')) with open(expected) as ex: expected = json.load(ex) - assert expected == results + assert results == expected def as_sorted_mapping_seq(licenses): @@ -103,8 +103,8 @@ def test_build_rules_from_licenses(self): def test_validate_license_library(self): errors, warnings, infos = models.License.validate( cache.get_licenses_db(), verbose=True) - assert {} == errors - assert {} == warnings + assert errors == {} + assert warnings == {} assert infos def test_validate_license_library_can_return_errors(self): @@ -136,7 +136,7 @@ def test_validate_license_library_can_return_errors(self): 'No SPDX license key'], } - assert expected_errors == errors + assert errors == expected_errors expected_warnings = { 'gpl-1.0': [ @@ -150,10 +150,10 @@ def test_validate_license_library_can_return_errors(self): 'Some duplicated URLs'] } - assert expected_warnings == warnings + assert warnings == expected_warnings expected_infos = {'w3c-docs-19990405': [u'No license text']} - assert expected_infos == infos + assert infos == expected_infos def test_load_licenses_fails_if_directory_contains_orphaned_files(self): test_dir = self.get_test_loc('models/orphaned_licenses') @@ -170,8 +170,8 @@ class TestRule(FileBasedTesting): def test_create_rule_ignore_punctuation(self): test_rule = models.Rule(stored_text='A one. A {{}}two. A three.') expected = ['one', 'two', 'three'] - assert expected == list(test_rule.tokens()) - assert 3 == test_rule.length + assert list(test_rule.tokens()) == expected + assert test_rule.length == 3 def test_create_plain_rule_with_text_file(self): @@ -183,8 +183,8 @@ def create_test_file(text): test_rule = models.Rule(text_file=create_test_file('A one. A two. A three.')) expected = ['one', 'two', 'three'] - assert expected == list(test_rule.tokens()) - assert 3 == test_rule.length + assert list(test_rule.tokens()) == expected + assert test_rule.length == 3 def test_load_rules(self): test_dir = self.get_test_loc('models/rules') @@ -241,7 +241,7 @@ def test_spdxrule_with_invalid_expression(self): def test_template_rule_is_loaded_correctly(self): test_dir = self.get_test_loc('models/rule_template') rules = list(models.load_rules(test_dir)) - assert 1 == len(rules) + assert len(rules) == 1 def test_rule_len_is_computed_correctly(self): test_text = '''zero one two three @@ -249,12 +249,12 @@ def test_rule_len_is_computed_correctly(self): five six seven eight nine ten''' r1 = models.Rule(stored_text=test_text) list(r1.tokens()) - assert 12 == r1.length + assert r1.length == 12 def test_rule_templates_are_ignored(self): test_text = '''{{gap0}}zero one two three{{gap2}}''' r1 = models.Rule(stored_text=test_text) - assert ['gap0', 'zero', 'one', 'two', 'three', 'gap2'] == list(r1.tokens()) + assert list(r1.tokens()) == ['gap0', 'zero', 'one', 'two', 'three', 'gap2'] def test_rule_tokens_are_computed_correctly_ignoring_templates(self): test_text = '''I hereby abandon any{{SAX 2.0 (the)}}, and Release all of {{the SAX 2.0 }}source code of his''' @@ -266,7 +266,7 @@ def test_rule_tokens_are_computed_correctly_ignoring_templates(self): 'release', 'all', 'of', 'the', 'sax', '2', '0', 'source', 'code', 'of', 'his' ] - assert expected == rule_tokens + assert rule_tokens == expected def test_compute_thresholds_occurences(self): minimum_coverage = 0.0 @@ -278,7 +278,7 @@ def test_compute_thresholds_occurences(self): expected_min_matched_length = 4 expected_min_high_matched_length = 3 expected = expected_min_cov, expected_min_matched_length, expected_min_high_matched_length - assert expected == results + assert results == expected length_unique = 39 high_length_unique = 7 @@ -288,7 +288,7 @@ def test_compute_thresholds_occurences(self): expected_min_matched_length_unique = 4 expected_min_high_matched_length_unique = 3 expected = expected_min_matched_length_unique, expected_min_high_matched_length_unique - assert expected == results + assert results == expected def test_Thresholds(self): @@ -303,7 +303,7 @@ def test_Thresholds(self): expected_min_matched_length = 8 expected_min_high_matched_length = 4 expected = expected_min_cov, expected_min_matched_length, expected_min_high_matched_length - assert expected == results + assert results == expected results = models.compute_thresholds_unique( r1.minimum_coverage, r1.length, r1.length_unique, r1.high_length_unique) @@ -311,21 +311,21 @@ def test_Thresholds(self): expected_min_matched_length_unique = 3 expected_min_high_matched_length_unique = 2 expected = expected_min_matched_length_unique, expected_min_high_matched_length_unique - assert expected == results + assert results == expected results = models.compute_thresholds_occurences(r2.minimum_coverage, r2.length, r2.high_length) expected_min_cov = 0.0 expected_min_matched_length = 4 expected_min_high_matched_length = 3 expected = expected_min_cov, expected_min_matched_length, expected_min_high_matched_length - assert expected == results + assert results == expected results = models.compute_thresholds_unique( r2.minimum_coverage, r2.length, r2.length_unique, r2.high_length_unique) expected_min_matched_length_unique = 4 expected_min_high_matched_length_unique = 1 expected = expected_min_matched_length_unique, expected_min_high_matched_length_unique - assert expected == results + assert results == expected def test_compute_relevance_does_not_change_stored_relevance(self): rule = models.Rule(stored_text='1', license_expression='public-domain') @@ -333,7 +333,7 @@ def test_compute_relevance_does_not_change_stored_relevance(self): rule.has_stored_relevance = True rule.length = 1000 rule.compute_relevance() - assert 13 == rule.relevance + assert rule.relevance == 13 def test_compute_relevance_is_hundred_for_false_positive(self): rule = models.Rule(stored_text='1', license_expression='public-domain') @@ -342,7 +342,7 @@ def test_compute_relevance_is_hundred_for_false_positive(self): rule.is_false_positive = True rule.length = 1000 rule.compute_relevance() - assert 100 == rule.relevance + assert rule.relevance == 100 def test_compute_relevance_is_using_rule_length(self): rule = models.Rule(stored_text='1', license_expression='some-license') @@ -352,71 +352,71 @@ def test_compute_relevance_is_using_rule_length(self): rule.length = 1000 rule.compute_relevance() - assert 100 == rule.relevance + assert rule.relevance == 100 rule.length = 21 rule.compute_relevance() - assert 100 == rule.relevance + assert rule.relevance == 100 rule.length = 20 rule.compute_relevance() - assert 100 == rule.relevance + assert rule.relevance == 100 rule.length = 18 rule.compute_relevance() - assert 100 == rule.relevance + assert rule.relevance == 100 rule.length = 17 rule.compute_relevance() - assert 94 == rule.relevance + assert rule.relevance == 94 rule.length = 16 rule.compute_relevance() - assert 88 == rule.relevance + assert rule.relevance == 88 rule.length = 15 rule.compute_relevance() - assert 83 == rule.relevance + assert rule.relevance == 83 rule.length = 14 rule.compute_relevance() - assert 77 == rule.relevance + assert rule.relevance == 77 rule.length = 13 rule.compute_relevance() - assert 72 == rule.relevance + assert rule.relevance == 72 rule.length = 12 rule.compute_relevance() - assert 66 == rule.relevance + assert rule.relevance == 66 rule.length = 11 rule.compute_relevance() - assert 61 == rule.relevance + assert rule.relevance == 61 rule.length = 10 rule.compute_relevance() - assert 55 == rule.relevance + assert rule.relevance == 55 rule.length = 8 rule.compute_relevance() - assert 44 == rule.relevance + assert rule.relevance == 44 rule.length = 5 rule.compute_relevance() - assert 27 == rule.relevance + assert rule.relevance == 27 rule.length = 2 rule.compute_relevance() - assert 11 == rule.relevance + assert rule.relevance == 11 rule.length = 1 rule.compute_relevance() - assert 5 == rule.relevance + assert rule.relevance == 5 rule.length = 0 rule.compute_relevance() - assert 0 == rule.relevance + assert rule.relevance == 0 def test_rule_must_have_text(self): data_file = self.get_test_loc('models/rule_no_text/mit.yml') @@ -446,4 +446,4 @@ def test_load_rules_loads_file_content_at_path_and_not_path_as_string(self): def test_Rule__validate_with_false_positive_rule(self): rule_dir = self.get_test_loc('models/rule_validate') rule = list(models.load_rules(rule_dir))[0] - assert [] == list(rule.validate()) + assert list(rule.validate()) == [] diff --git a/tests/licensedcode/test_plugin_license_policy.py b/tests/licensedcode/test_plugin_license_policy.py index ecf2b8a380f..2168612cc7e 100644 --- a/tests/licensedcode/test_plugin_license_policy.py +++ b/tests/licensedcode/test_plugin_license_policy.py @@ -33,7 +33,7 @@ def test_process_codebase_info_license_duplicate_key_policy_file(self): for result in scan_result['files']: assert 'license_policy' in result.keys() - assert {} == result['license_policy'] + assert result['license_policy'] == {} def test_process_codebase_info_license_valid_policy_file(self): test_dir = self.extract_test_tar('plugin_license_policy/policy-codebase.tgz') @@ -136,28 +136,28 @@ def test_has_policy_duplcates_invalid_dupes(self): result = has_policy_duplicates(test_file) - assert True == result + assert result == True def test_has_policy_duplcates_valid(self): test_file = self.get_test_loc('plugin_license_policy/has_policy_duplicates_valid.yml') result = has_policy_duplicates(test_file) - assert False == result + assert result == False def test_has_policy_duplicates_empty(self): test_file = self.get_test_loc('plugin_license_policy/has_policy_duplicates_empty.yml') result = has_policy_duplicates(test_file) - assert False == result + assert result == False def test_has_policy_duplicates_invalid_no_dupes(self): test_file = self.get_test_loc('plugin_license_policy/has_policy_duplicates_invalid_no_dupes.yml') result = has_policy_duplicates(test_file) - assert False == result + assert result == False def test_load_license_policy_duplicate_keys(self): test_file = self.get_test_loc('plugin_license_policy/load_license_policy_duplicate_keys.yml') @@ -205,7 +205,7 @@ def test_load_license_policy_duplicate_keys(self): result = load_license_policy(test_file) - assert expected == result + assert result == expected def test_load_license_policy_valid(self): test_file = self.get_test_loc('plugin_license_policy/load_license_policy_valid.yml') @@ -247,7 +247,7 @@ def test_load_license_policy_valid(self): result = load_license_policy(test_file) - assert expected == result + assert result == expected def test_load_license_policy_empty(self): test_file = self.get_test_loc('plugin_license_policy/load_license_policy_empty.yml') @@ -258,11 +258,11 @@ def test_load_license_policy_empty(self): result = load_license_policy(test_file) - assert expected == result + assert result == expected def test_load_license_policy_invalid(self): test_file = self.get_test_loc('plugin_license_policy/load_license_policy_invalid.yml') result = load_license_policy(test_file) - assert {} == result + assert result == {} diff --git a/tests/licensedcode/test_query.py b/tests/licensedcode/test_query.py index aac1702fecb..f789a19455f 100644 --- a/tests/licensedcode/test_query.py +++ b/tests/licensedcode/test_query.py @@ -36,7 +36,7 @@ def check_result_equals_expected_json(result, expected, regen=False): with open(expected) as ex: expected = json.loads(ex.read()) - assert expected == result + assert result == expected class IndexTesting(FileBasedTesting): @@ -79,7 +79,7 @@ def test_Query_tokens_by_line_from_string(self): [None], ] - assert expected == result + assert result == expected # convert tid to actual token strings qtbl_as_str = lambda qtbl: [[None if tid is None else idx.tokens_by_tid[tid] for tid in tids] for tids in qtbl] @@ -95,16 +95,16 @@ def test_Query_tokens_by_line_from_string(self): [None], ] - assert expected_str == result_str + assert result_str == expected_str - assert [3, 3, 3, 3, 3, 3, 3, 3, 3, 6] == qry.line_by_pos + assert qry.line_by_pos == [3, 3, 3, 3, 3, 3, 3, 3, 3, 6] idx = index.LicenseIndex([Rule(stored_text=rule_text, license_expression='bsd')]) querys = 'and this is not a license' qry = Query(query_string=querys, idx=idx, _test_mode=True) result = list(qry.tokens_by_line()) expected = [['and', None, None, None, 'license']] - assert expected == qtbl_as_str(result) + assert qtbl_as_str(result) == expected def test_Query_known_and_unknown_positions(self): @@ -117,15 +117,14 @@ def test_Query_known_and_unknown_positions(self): qry = Query(query_string=querys, idx=idx, _test_mode=False) # we have only 4 known positions in this query, hence only 4 entries there on a single line # "Redistribution and use in" - assert [1, 1, 1, 1, 1] == qry.line_by_pos + assert qry.line_by_pos == [1, 1, 1, 1, 1] # this show our 4 known token in this query with their known position # "Redistribution and use in" - assert [1, 2, 3, 4, 0] == qry.tokens + assert qry.tokens == [1, 2, 3, 4, 0] # the first two tokens are unknown, then starting after "in" we have three trailing unknown. - assert {3: 1, 4: 1, -1: 2} == qry.unknowns_by_pos - + assert qry.unknowns_by_pos == {3: 1, 4: 1, -1: 2} # This shows how knowns and unknowns are blended result = list(qry.tokens_with_unknowns()) expected = [ @@ -142,7 +141,7 @@ def test_Query_known_and_unknown_positions(self): # other form always' None, 0, None ] - assert expected == result + assert result == expected def test_Query_tokenize_from_string(self): @@ -163,23 +162,23 @@ def test_Query_tokenize_from_string(self): expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and'] result = tks_as_str(qry.tokens) - assert expected == result + assert result == expected expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and', None, None] result = tks_as_str(qry.tokens_with_unknowns()) - assert expected == result + assert result == expected - assert 1 == len(qry.query_runs) + assert len(qry.query_runs) == 1 qr1 = qry.query_runs[0] - assert 0 == qr1.start - assert 9 == qr1.end - assert 10 == len(qr1) + assert qr1.start == 0 + assert qr1.end == 9 + assert len(qr1) == 10 expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and'] result = tks_as_str(qr1.tokens) - assert expected == result + assert result == expected expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and'] result = tks_as_str(qr1.tokens_with_unknowns()) - assert expected == result + assert result == expected def test_QueryRuns_tokens_with_unknowns(self): rule_text = 'Redistribution and use in source and binary forms with or without modification are permitted' @@ -193,22 +192,22 @@ def test_QueryRuns_tokens_with_unknowns(self): Always''' qry = Query(query_string=querys, idx=idx) - assert set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) == set(qry.matchables) + assert set(qry.matchables) == set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - assert 1 == len(qry.query_runs) + assert len(qry.query_runs) == 1 qrun = qry.query_runs[0] # convert tid to actual token strings tks_as_str = lambda tks: [None if tid is None else idx.tokens_by_tid[tid] for tid in tks] expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and'] - assert expected == tks_as_str(qrun.tokens) + assert tks_as_str(qrun.tokens) == expected expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and'] - assert expected == tks_as_str(qrun.tokens_with_unknowns()) + assert tks_as_str(qrun.tokens_with_unknowns()) == expected - assert 0 == qrun.start - assert 9 == qrun.end + assert qrun.start == 0 + assert qrun.end == 9 def test_QueryRun_does_not_end_with_None(self): rule_text = 'Redistribution and use in source and binary forms, with or without modification, are permitted' @@ -238,21 +237,21 @@ def test_QueryRun_does_not_end_with_None(self): 'modification', None ] - assert [x for x in expected if x] == tks_as_str(qry.tokens) - assert expected == tks_as_str(qry.tokens_with_unknowns()) + assert tks_as_str(qry.tokens) == [x for x in expected if x] + assert tks_as_str(qry.tokens_with_unknowns()) == expected - assert 2 == len(qry.query_runs) + assert len(qry.query_runs) == 2 qrun = qry.query_runs[0] expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'forms', 'with', 'or', 'without', 'modification', 'are', 'permitted'] - assert expected == tks_as_str(qrun.tokens) - assert 0 == qrun.start - assert 13 == qrun.end + assert tks_as_str(qrun.tokens) == expected + assert qrun.start == 0 + assert qrun.end == 13 qrun = qry.query_runs[1] expected = ['modification'] - assert expected == tks_as_str(qrun.tokens) - assert 14 == qrun.start - assert 14 == qrun.end + assert tks_as_str(qrun.tokens) == expected + assert qrun.start == 14 + assert qrun.end == 14 def test_Query_from_real_index_and_location(self): idx = index.LicenseIndex(self.get_test_rules('index/bsd')) @@ -269,13 +268,13 @@ def test_Query_from_real_index_and_location(self): u'provided by the copyright holders and contributors as is') }, {'end': 36, 'start': 36, 'tokens': u'redistributions'}] - assert expected == result + assert result == expected expected_lbp = [ 4, 4, 4, 4, 4, 4, 4, 4, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 15 ] - assert expected_lbp == qry.line_by_pos + assert qry.line_by_pos == expected_lbp def test_query_and_index_tokens_are_identical_for_same_text(self): rule_dir = self.get_test_loc('query/rtos_exact/') @@ -298,72 +297,72 @@ def test_query_run_tokens_with_junk(self): idx = index.LicenseIndex([Rule(stored_text='a is the binary')], _legalese=legalese, _spdx_tokens=set()) - assert 1 == idx.len_legalese - assert {u'binary': 0, u'is': 1, u'the': 2} == idx.dictionary + assert idx.len_legalese == 1 + assert idx.dictionary == {u'binary': 0, u'is': 1, u'the': 2} # two junks q = Query(query_string='a the', idx=idx) assert q.line_by_pos qrun = q.query_runs[0] - assert [2] == qrun.tokens - assert {} == qrun.query.unknowns_by_pos + assert qrun.tokens == [2] + assert qrun.query.unknowns_by_pos == {} # one junk q = Query(query_string='a binary', idx=idx) qrun = q.query_runs[0] assert q.line_by_pos - assert [0] == qrun.tokens - assert {} == qrun.query.unknowns_by_pos + assert qrun.tokens == [0] + assert qrun.query.unknowns_by_pos == {} # one junk q = Query(query_string='binary the', idx=idx) qrun = q.query_runs[0] assert q.line_by_pos - assert [0, 2] == qrun.tokens - assert {} == qrun.query.unknowns_by_pos + assert qrun.tokens == [0, 2] + assert qrun.query.unknowns_by_pos == {} # one unknown at start q = Query(query_string='that binary', idx=idx) qrun = q.query_runs[0] assert q.line_by_pos - assert [0] == qrun.tokens - assert {-1: 1} == qrun.query.unknowns_by_pos + assert qrun.tokens == [0] + assert qrun.query.unknowns_by_pos == {-1: 1} # one unknown at end q = Query(query_string='binary that', idx=idx) qrun = q.query_runs[0] assert q.line_by_pos - assert [0] == qrun.tokens - assert {0: 1} == qrun.query.unknowns_by_pos + assert qrun.tokens == [0] + assert qrun.query.unknowns_by_pos == {0: 1} # onw unknown in the middle q = Query(query_string='binary that a binary', idx=idx) qrun = q.query_runs[0] assert q.line_by_pos - assert [0, 0] == qrun.tokens - assert {0: 1} == qrun.query.unknowns_by_pos + assert qrun.tokens == [0, 0] + assert qrun.query.unknowns_by_pos == {0: 1} # onw unknown in the middle q = Query(query_string='a binary that a binary', idx=idx) qrun = q.query_runs[0] assert q.line_by_pos - assert [0, 0] == qrun.tokens - assert {0: 1} == qrun.query.unknowns_by_pos + assert qrun.tokens == [0, 0] + assert qrun.query.unknowns_by_pos == {0: 1} # two unknowns in the middle q = Query(query_string='binary that was a binary', idx=idx) qrun = q.query_runs[0] assert q.line_by_pos - assert [0, 0] == qrun.tokens - assert {0: 2} == qrun.query.unknowns_by_pos + assert qrun.tokens == [0, 0] + assert qrun.query.unknowns_by_pos == {0: 2} # unknowns at start, middle and end q = Query(query_string='hello dolly binary that was a binary end really', idx=idx) # u u u u u u qrun = q.query_runs[0] assert q.line_by_pos - assert [0, 0] == qrun.tokens - assert {0: 2, 1: 2, -1: 2} == qrun.query.unknowns_by_pos + assert qrun.tokens == [0, 0] + assert qrun.query.unknowns_by_pos == {0: 2, 1: 2, -1: 2} def test_query_tokens_are_same_for_different_text_formatting(self): @@ -379,26 +378,26 @@ def test_query_tokens_are_same_for_different_text_formatting(self): idx = index.LicenseIndex([Rule(text_file=rule_file, license_expression='mit')]) q = Query(location=rule_file, idx=idx) - assert 1 == len(q.query_runs) + assert len(q.query_runs) == 1 expected = q.query_runs[0] for tf in test_files: q = Query(tf, idx=idx) qr = q.query_runs[0] - assert expected.tokens == qr.tokens + assert qr.tokens == expected.tokens def test_query_run_unknowns(self): legalese = set(['binary']) idx = index.LicenseIndex([Rule(stored_text='a is the binary')], _legalese=legalese) - assert {u'binary': 0, u'is': 1, u'the': 2} == idx.dictionary - assert 1 == idx.len_legalese + assert idx.dictionary == {u'binary': 0, u'is': 1, u'the': 2} + assert idx.len_legalese == 1 # multiple unknowns at start, middle and end q = Query(query_string='that new binary was sure a kind of the real mega deal', idx=idx) # known pos 0 1 2 # abs pos 0 1 2 3 4 5 6 7 8 9 10 11 expected = {-1: 2, 0: 4, 1: 3} - assert expected == dict(q.unknowns_by_pos) + assert dict(q.unknowns_by_pos) == expected class TestQueryWithMultipleRuns(IndexTesting): @@ -419,7 +418,7 @@ def test_query_runs_from_location(self): 'end': 36, 'tokens': u'redistributions'} ] - assert expected == result + assert result == expected def test_query_runs_three_runs(self): idx = index.LicenseIndex(self.get_test_rules('index/bsd')) @@ -436,18 +435,18 @@ def test_query_runs_three_runs(self): ] result = [q.to_dict(brief=True) for q in qry.query_runs] - assert expected == result + assert result == expected def test_QueryRun(self): idx = index.LicenseIndex([Rule(stored_text='redistributions in binary form must redistributions in')]) qry = Query(query_string='redistributions in binary form must redistributions in', idx=idx) qruns = qry.query_runs - assert 1 == len(qruns) + assert len(qruns) == 1 qr = qruns[0] # test result = [idx.tokens_by_tid[tid] for tid in qr.tokens] expected = ['redistributions', 'in', 'binary', 'form', 'must', 'redistributions', 'in'] - assert expected == result + assert result == expected def test_QueryRun_repr(self): idx = index.LicenseIndex([Rule(stored_text='redistributions in binary form must redistributions in')]) @@ -456,10 +455,10 @@ def test_QueryRun_repr(self): qr = qruns[0] # test expected = 'QueryRun(start=0, len=7, start_line=1, end_line=1)' - assert expected == repr(qr) + assert repr(qr) == expected expected = 'QueryRun(start=0, len=7, start_line=1, end_line=1, tokens="redistributions in binary form must redistributions in")' - assert expected == qr.__repr__(trace_repr=True) + assert qr.__repr__(trace_repr=True) == expected def test_query_runs_text_is_correct(self): test_rules = self.get_test_rules('query/full_text/idx',) @@ -495,7 +494,7 @@ def test_query_runs_text_is_correct(self): software even if advised of the possibility of such damage'''.split(), u'no of'.split(), ] - assert expected == result + assert result == expected def test_query_runs_with_plain_rule(self): rule_text = u'''X11 License @@ -553,8 +552,8 @@ def test_query_runs_with_plain_rule(self): u'system is trademark of x consortium inc' ) }] - assert 214 == len(qry.query_runs[0].tokens) - assert expected == result + assert len(qry.query_runs[0].tokens) == 214 + assert result == expected def test_query_run_has_correct_offset(self): rule_dir = self.get_test_loc('query/runs/rules') @@ -581,7 +580,7 @@ def test_query_run_has_correct_offset(self): } ] - assert expected == result + assert result == expected def test_query_run_and_tokenizing_breaking_works__with_plus_as_expected(self): rule_dir = self.get_test_loc('query/run_breaking/rules') @@ -608,7 +607,7 @@ def test_query_run_and_tokenizing_breaking_works__with_plus_as_expected(self): 'street fifth floor boston ma 02110 1301 usa'} ] - assert expected == result + assert result == expected # check rules token are the same exact set as the set of the last query run txtid = idx.tokens_by_tid @@ -661,7 +660,7 @@ def test_QueryRun_with_all_digit_lines(self): {u'end': 17, u'start': 13, u'tokens': u'1955 724 2 932 234'}, {u'end': 20, u'start': 18, u'tokens': u'694 634 110'}, ] - assert expected == result + assert result == expected assert not any(qr.is_matchable() for qr in qry.query_runs) @@ -710,7 +709,7 @@ def test_query_run_tokens(self): '''.split()) idx = cache.get_index() result = Query(query_string=query_s, idx=idx) - assert 1 == len(result.query_runs) + assert len(result.query_runs) == 1 qr = result.query_runs[0] # NOTE: this is not a token present in any rules or licenses unknown_tokens = ('baridationally',) @@ -738,7 +737,7 @@ def test_query_run_tokens_matchable(self): types h types h h h h h '''.split()) result = Query(query_string=query_s, idx=idx) - assert 1 == len(result.query_runs) + assert len(result.query_runs) == 1 qr = result.query_runs[0] expected_qr0 = u' '.join(u''' 3 unable to create proc entry license gpl description driver author eric @@ -751,15 +750,15 @@ def test_query_run_tokens_matchable(self): linux include asm include asm generic include acpi acpi c posix types 32 h types h types h h h h h '''.split()) - assert expected_qr0 == u' '.join(idx.tokens_by_tid[t] for t in qr.tokens) + assert u' '.join(idx.tokens_by_tid[t] for t in qr.tokens) == expected_qr0 - assert expected_qr0 == u' '.join( - idx.tokens_by_tid[t] for p, t in enumerate(qr.tokens) if p in qr.matchables) + assert u' '.join(idx.tokens_by_tid[t] for p, t in enumerate( + qr.tokens) if p in qr.matchables) == expected_qr0 # only gpl and gnu are is in high matchables expected = u'license gpl author gnu gnu' - assert expected == u' '.join( - idx.tokens_by_tid[t] for p, t in enumerate(qr.tokens) if p in qr.high_matchables) + assert u' '.join(idx.tokens_by_tid[t] for p, t in enumerate( + qr.tokens) if p in qr.high_matchables) == expected def test_query_run_for_text_with_long_lines(self): location1 = self.get_test_loc('query/long_lines.txt') diff --git a/tests/licensedcode/test_seq.py b/tests/licensedcode/test_seq.py index e5067a045a5..816ee3a21c6 100644 --- a/tests/licensedcode/test_seq.py +++ b/tests/licensedcode/test_seq.py @@ -144,4 +144,4 @@ def test_find_longest_match_seq(self): b_end = 53 tests = seq.find_longest_match(a, b, a_start, len(a), b_start, b_end, b2j, len_good, matchables) - assert seq.Match(a=357, b=0, size=8) == tests + assert tests == seq.Match(a=357, b=0, size=8) diff --git a/tests/licensedcode/test_tokenize.py b/tests/licensedcode/test_tokenize.py index 725e51f6787..4f8b0924984 100644 --- a/tests/licensedcode/test_tokenize.py +++ b/tests/licensedcode/test_tokenize.py @@ -48,19 +48,19 @@ def test_word_splitter(self): u'modification', u'are', u'permitted'] - assert expected == result + assert result == expected def test_word_splitter_with_trailing_plus(self): text = u'gpl-3.0+' result = list(word_splitter(text)) expected = [u'gpl', u'3', u'0+'] - assert expected == result + assert result == expected def test_word_splitter_with_internal_plus(self): text = u'gpl-+3.0' result = list(word_splitter(text)) expected = [u'gpl', u'3', u'0'] - assert expected == result + assert result == expected def test_query_lines_from_location(self): query_loc = self.get_test_loc('index/queryperfect-mini') @@ -72,7 +72,7 @@ def test_query_lines_from_location(self): u'Always', ] result = [l for _, l in query_lines(location=query_loc)] - assert expected == result + assert result == expected def test_query_lines_from_location_return_a_correct_number_of_lines(self): query_loc = self.get_test_loc('tokenize/correct_lines') @@ -86,7 +86,7 @@ def test_query_lines_from_location_return_a_correct_number_of_lines(self): ', , , sublicense, and/or Software, ,'), (1, u'subject')] result = list(query_lines(location=query_loc)) - assert expected == result + assert result == expected def test_query_lines_from_string(self): query_string = ''' @@ -106,7 +106,7 @@ def test_query_lines_from_string(self): u'', ] result = [l for _, l in query_lines(query_string=query_string)] - assert expected == result + assert result == expected def test_query_lines_complex(self): query_loc = self.get_test_loc('index/querytokens') @@ -128,32 +128,32 @@ def test_query_lines_complex(self): u'Redistributions', ] result = [l for _, l in query_lines(location=query_loc)] - assert expected == result + assert result == expected def test_query_tokenizer_handles_empty_string(self): text = '' result = list(query_tokenizer(text)) - assert [] == result + assert result == [] def test_query_tokenizer_handles_blank_lines(self): text = u' \n\n\t ' result = list(query_tokenizer(text)) - assert [] == result + assert result == [] def test_query_tokenizer_handles_blank_lines2(self): text = ' \n\t ' result = list(query_tokenizer(text)) - assert [] == result + assert result == [] def test_query_tokenizer_handles_empty_lines(self): text = u'\n\n' expected = [] - assert expected == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == expected def test_query_tokenizer_can_split(self): text = u'abc def \n GHI' result = list(query_tokenizer(text)) - assert [u'abc', u'def', u'ghi'] == result + assert result == [u'abc', u'def', u'ghi'] def test_query_tokenizer(self): text = u'''Redistribution and use in source and binary forms, with or @@ -170,23 +170,23 @@ def test_query_tokenizer(self): conditions are met redistributions of source code must retain the above copyright notice this list of conditions and the following disclaimer'''.split() - assert expected == result + assert result == expected def test_query_tokenizer_behavior1(self): text , expected = 'MODULE_LICENSE("Dual BSD/GPL");', ['module', 'license', 'dual', 'bsd', 'gpl'] - assert expected == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == expected def test_query_tokenizer_behavior2(self): text , expected = 'Dual BSD/GPL', ['dual', 'bsd', 'gpl'] - assert expected == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == expected def test_query_tokenizer_behavior3(self): text , expected = 'license=Dual BSD/GPL', ['license', 'dual', 'bsd', 'gpl'] - assert expected == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == expected def test_query_tokenizer_behavior4(self): text , expected = 'license_Dual+BSD-GPL', ['license', 'dual+bsd', 'gpl'] - assert expected == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == expected def test_query_tokenizer_behavior_from_file(self, regen=False): test_file = self.get_test_loc('tokenize/freertos/gpl-2.0-freertos.RULE') @@ -201,24 +201,24 @@ def test_query_tokenizer_behavior_from_file(self, regen=False): with io.open(expected_file, encoding='utf-8') as exc_test: expected = json.load(exc_test) - assert expected == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == expected def test_query_tokenizer_can_split_legacy_templates(self): text = u'abc def \n {{temp}} GHI' result = list(query_tokenizer(text)) expected = [u'abc', u'def', u'temp', u'ghi', ] - assert expected == result + assert result == expected def test_query_tokenizer_merges_contiguous_gaps(self): text = u'abc{{temp}}{{xzy}}def' result = list(query_tokenizer(text)) expected = [u'abc', u'temp', u'xzy', u'def'] - assert expected == result + assert result == expected def test_query_tokenizer_handles_empty_legacy_templates(self): text = u'ab{{}}cd' expected = [u'ab', u'cd'] - assert expected == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == expected def test_query_tokenizer_does_not_throw_exception_for_pystache_templates(self): text = u'''Permission to use, copy, modify, and {{ /or : the @@ -251,7 +251,7 @@ def test_query_tokenizer_handles_unicode_text_correctly(self): test_file = self.get_test_loc('tokenize/unicode/12180.atxt') with io.open(test_file, encoding='utf-8') as test: - assert expected == list(query_tokenizer(test.read())) + assert list(query_tokenizer(test.read())) == expected def test_query_tokenizer_can_handle_long_text(self): expected = [ @@ -279,7 +279,7 @@ def test_query_tokenizer_can_handle_long_text(self): ] test_file = self.get_test_loc('tokenize/unicode/12180.txt') with io.open(test_file, encoding='utf-8') as test: - assert expected == list(query_tokenizer(test.read())) + assert list(query_tokenizer(test.read())) == expected def test_query_tokenizer_does_not_crash_on_unicode_rules_text_1(self): test_file = self.get_test_loc('tokenize/unicode/12290.txt') @@ -308,7 +308,7 @@ def test_query_tokenizer_does_not_crash_on_unicode_rules_text_5(self): def test_query_tokenizer_does_not_crash_with_non_well_formed_legacy_templatized_parts(self): text = u'abcd{{ddd' - assert [u'abcd', u'ddd'] == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == [u'abcd', u'ddd'] def test_query_tokenizer_can_parse_ill_formed_legacy_template_from_file(self, regen=False): test_file = self.get_test_loc('tokenize/ill_formed_template/text.txt') @@ -323,7 +323,7 @@ def test_query_tokenizer_can_parse_ill_formed_legacy_template_from_file(self, re with io.open(expected_file, encoding='utf-8') as ex: expected = json.load(ex) - assert expected == result + assert result == expected def test_tokenizers_regex_do_not_choke_on_some_text(self): # somehow this text was making the regex choke. @@ -352,7 +352,7 @@ def test_query_tokenizer_handles_rarer_unicode_codepoints(self): text = '♡ Copying Art is an act of love. Love is not subject to law.' expected = [u'copying', u'art', u'is', u'an', u'act', u'of', u'love', u'love', u'is', u'not', u'subject', u'to', u'law'] - assert expected == list(query_tokenizer(text)) + assert list(query_tokenizer(text)) == expected def test_query_lines_on_html_like_texts(self, regen=False): test_file = self.get_test_loc('tokenize/htmlish.txt') @@ -368,7 +368,7 @@ def test_query_lines_on_html_like_texts(self, regen=False): with io.open(expected_file, encoding='utf-8') as exc_test: expected = json.load(exc_test) - assert expected == result + assert result == expected def test_query_lines_on_html_like_texts_2(self, regen=False): test_file = self.get_test_loc('tokenize/htmlish.html') @@ -384,7 +384,7 @@ def test_query_lines_on_html_like_texts_2(self, regen=False): with io.open(expected_file, encoding='utf-8') as exc_test: expected = json.load(exc_test) - assert expected == result + assert result == expected def test_query_tokenizer_on_html_like_texts(self, regen=False): test_file = self.get_test_loc('tokenize/htmlish.txt') @@ -403,7 +403,7 @@ def test_query_tokenizer_on_html_like_texts(self, regen=False): with io.open(expected_file, encoding='utf-8') as exc_test: expected = json.load(exc_test) - assert expected == result + assert result == expected def test_query_tokenizer_lines_on_html_like_texts_2(self, regen=False): test_file = self.get_test_loc('tokenize/htmlish.html') @@ -422,7 +422,7 @@ def test_query_tokenizer_lines_on_html_like_texts_2(self, regen=False): with io.open(expected_file, encoding='utf-8') as exc_test: expected = json.load(exc_test) - assert expected == result + assert result == expected class TestNgrams(FileBasedTesting): @@ -442,7 +442,7 @@ def test_ngrams(self): ('source', 'and', 'binary', 'are'), ('and', 'binary', 'are', 'permitted.') ] - assert expected == result + assert result == expected def test_ngrams_with_None(self): tokens = ['Redistribution', 'and', 'use', None, 'in', 'source', 'and', 'binary', 'are', None] @@ -455,7 +455,7 @@ def test_ngrams_with_None(self): ('in', 'source', 'and', 'binary'), ('source', 'and', 'binary', 'are'), ('and', 'binary', 'are', None)] - assert expected == result + assert result == expected def test_ngrams_with_None_length_three(self): tokens = ['Redistribution', 'and', 'use', None, 'in', 'source', 'and', 'binary', 'are', None] @@ -469,7 +469,7 @@ def test_ngrams_with_None_length_three(self): ('source', 'and', 'binary'), ('and', 'binary', 'are'), ('binary', 'are', None)] - assert expected == result + assert result == expected def test_ngrams2(self): tokens = ''' @@ -485,12 +485,12 @@ def test_ngrams2(self): ('source', 'and', 'binary', 'are'), ('and', 'binary', 'are', 'permitted.')] - assert expected == result + assert result == expected def test_select_ngrams_with_unicode_inputs(self): result = list(select_ngrams(x for x in [('b', 'ä', 'c'), ('ä', 'ä', 'c'), ('e', 'ä', 'c'), ('b', 'f', 'ä'), ('g', 'c', 'd')])) expected = [('b', 'ä', 'c'), ('ä', 'ä', 'c'), ('e', 'ä', 'c'), ('b', 'f', 'ä'), ('g', 'c', 'd')] - assert expected == result + assert result == expected class MatchedTextTokenizer(FileBasedTesting): @@ -537,11 +537,11 @@ def test_tokens_and_non_tokens_yield_properly_all_texts(self): {'punct': None, 'token': 'İrəli'}, {'punct': ' .\t\n\n \r', 'token': None} ] - assert expected == result + assert result == expected result_as_text = u''.join(itertools.chain.from_iterable( [v for v in m.groupdict().values() if v] for m in tokens_and_non_tokens(text))) - assert text == result_as_text + assert result_as_text == text def test_matched_query_text_tokenizer_works_with_spdx_ids(self): text = u''' * SPDX-License-Identifier: GPL-2.0+ BSD-3-Clause @@ -602,11 +602,11 @@ def test_matched_query_text_tokenizer_works_with_spdx_ids(self): (False, u')\n ') ] - assert expected == result + assert result == expected result_as_text = u''.join(itertools.chain.from_iterable( [v for v in m.groupdict().values() if v] for m in tokens_and_non_tokens(text))) - assert text == result_as_text + assert result_as_text == text def test_matched_query_text_tokenizer_and_query_tokenizer_should_yield_the_same_texts(self): text = u'''Redistribution+ ;and use in! + 2003 source and +binary forms, diff --git a/tests/licensedcode/test_zzzz_cache.py b/tests/licensedcode/test_zzzz_cache.py index f3cb768562b..a090990a1fe 100644 --- a/tests/licensedcode/test_zzzz_cache.py +++ b/tests/licensedcode/test_zzzz_cache.py @@ -35,29 +35,29 @@ def test_tree_checksum_ignores_some_files_and_directories(self): fileutils.create_dir(os.path.join(test_dir, 'some dir')) after = cache.tree_checksum(test_dir) - assert before == after + assert after == before with open(os.path.join(test_dir, 'some.py'), 'w') as py: py.write(' ') after = cache.tree_checksum(test_dir) - assert before != after + assert after != before before = after with open(os.path.join(test_dir, 'some.LICENSE'), 'w') as f: f.write(' ') after = cache.tree_checksum(test_dir) - assert before != after + assert after != before before = after with open(os.path.join(test_dir, 'some.LICENSE~'), 'w') as f: f.write(' ') after = cache.tree_checksum(test_dir) - assert before == after + assert after == before with open(os.path.join(test_dir, 'some.LICENSE.swp'), 'w') as f: f.write(' ') after = cache.tree_checksum(test_dir) - assert before == after + assert after == before def test_tree_checksum_is_different_when_file_is_added(self): test_dir = self.get_test_loc('cache/tree', copy=True) @@ -66,13 +66,13 @@ def test_tree_checksum_is_different_when_file_is_added(self): with open(os.path.join(test_dir, 'some.py'), 'w') as py: py.write(' ') after = cache.tree_checksum(test_dir) - assert before != after + assert after != before before = after with open(os.path.join(test_dir, 'some.LICENSE'), 'w') as f: f.write(' ') after = cache.tree_checksum(test_dir) - assert before != after + assert after != before def test_tree_checksum_is_different_when_file_is_changed(self): test_dir = self.get_test_loc('cache/tree', copy=True) @@ -84,7 +84,7 @@ def test_tree_checksum_is_different_when_file_is_changed(self): with open(os.path.join(test_dir, 'some.py'), 'w') as py: py.write(' asas') after = cache.tree_checksum(test_dir) - assert before != after + assert after != before def test_tree_checksum_is_different_when_file_is_removed(self): test_dir = self.get_test_loc('cache/tree', copy=True) @@ -96,7 +96,7 @@ def test_tree_checksum_is_different_when_file_is_removed(self): fileutils.delete(new_file) after = cache.tree_checksum(test_dir) - assert before != after + assert after != before def test_build_index(self): # note: this is a rather complex test because caching involves some globals @@ -143,8 +143,8 @@ def test_build_index(self): rules_data_dir=rules_data_dir, ) - assert tree_before == open(checksum_file).read() - assert idx_checksum_before == hash.sha1(cache_file) + assert open(checksum_file).read() == tree_before + assert hash.sha1(cache_file) == idx_checksum_before # now add some file in the source tree new_file = os.path.join(tree_base_dir, 'some file') @@ -162,8 +162,8 @@ def test_build_index(self): licenses_data_dir=licenses_data_dir, rules_data_dir=rules_data_dir, ) - assert tree_before == open(checksum_file).read() - assert idx_checksum_before == hash.sha1(cache_file) + assert open(checksum_file).read() == tree_before + assert hash.sha1(cache_file) == idx_checksum_before # when check_consistency is True, the index is rebuilt when new # files are added @@ -176,7 +176,7 @@ def test_build_index(self): licenses_data_dir=licenses_data_dir, rules_data_dir=rules_data_dir, ) - assert tree_before != open(checksum_file).read() + assert open(checksum_file).read() != tree_before # now add some ignored file in the source tree tree_before = open(checksum_file).read() @@ -197,8 +197,8 @@ def test_build_index(self): rules_data_dir=rules_data_dir, ) - assert tree_before == open(checksum_file).read() - assert idx_checksum_before == hash.sha1(cache_file) + assert open(checksum_file).read() == tree_before + assert hash.sha1(cache_file) == idx_checksum_before # if the treechecksum file dies, the index is not rebuilt if # check_consistency is False. and no new checksum is created @@ -231,7 +231,7 @@ def test_build_index(self): rules_data_dir=rules_data_dir, ) - assert tree_before == open(checksum_file).read() + assert open(checksum_file).read() == tree_before # if the index cache file dies the index is rebuilt fileutils.delete(cache_file) @@ -248,7 +248,7 @@ def test_build_index(self): # load index, forced from file idx2 = cache.load_index(cache_file) - assert set(idx1.dictionary.keys()) == set(idx2.dictionary.keys()) + assert set(idx2.dictionary.keys()) == set(idx1.dictionary.keys()) # reset global caches cache._LICENSE_SYMBOLS_BY_SPDX_KEY = {} @@ -267,13 +267,13 @@ def test_load_index_with_corrupted_index(self): assert 'Failed to load license cache' in str(ex) def test_get_unknown_spdx_symbol(self): - assert 'unknown-spdx' == cache.get_unknown_spdx_symbol().key + assert cache.get_unknown_spdx_symbol().key == 'unknown-spdx' def test_get_unknown_spdx_symbol_from_defined_db(self): test_dir = self.get_test_loc('spdx/db-unknown') from licensedcode.models import load_licenses test_licenses = load_licenses(test_dir) - assert 'unknown-spdx' == cache.get_unknown_spdx_symbol(_test_licenses=test_licenses).key + assert cache.get_unknown_spdx_symbol(_test_licenses=test_licenses).key == 'unknown-spdx' def test_get_spdx_symbols_from_dir(self): test_dir = self.get_test_loc('spdx/db') @@ -290,7 +290,7 @@ def test_get_spdx_symbols_from_dir(self): u'xskat': u'xskat' } - assert expected == result + assert result == expected def test_get_spdx_symbols(self): result = cache.get_spdx_symbols() diff --git a/tests/packagedcode/packages_test_utils.py b/tests/packagedcode/packages_test_utils.py index 3af735de125..fa84b9d1e5e 100644 --- a/tests/packagedcode/packages_test_utils.py +++ b/tests/packagedcode/packages_test_utils.py @@ -40,9 +40,9 @@ def check_package(self, package, expected_loc, regen=False): expected = json.load(ex, encoding='utf-8') try: - assert expected == results + assert results == expected except AssertionError: - assert json.dumps(expected, indent=2) == json.dumps(results, indent=2) + assert json.dumps(results, indent=2) == json.dumps(expected, indent=2) def check_packages(self, packages, expected_loc, regen=False): """ @@ -69,7 +69,7 @@ def check_packages(self, packages, expected_loc, regen=False): expected_packages = json.load(ex, encoding='utf-8') for expected_package, result in zip(expected_packages, results): - assert expected_package == result + assert result == expected_package def check_result_equals_expected_json(result, expected, regen=False): @@ -84,4 +84,4 @@ def check_result_equals_expected_json(result, expected, regen=False): with open(expected) as ex: expected = json.loads(ex.read()) - assert expected == result + assert result == expected diff --git a/tests/packagedcode/test_build.py b/tests/packagedcode/test_build.py index eb5ebc1de4c..450095616a4 100644 --- a/tests/packagedcode/test_build.py +++ b/tests/packagedcode/test_build.py @@ -43,7 +43,7 @@ def test_build_get_package_resources(self): 'get_package_resources/file1', ] results = [r.path for r in build.BaseBuildManifestPackage.get_package_resources(root, codebase)] - assert expected == results + assert results == expected def test_BazelPackage_recognize(self): test_file = self.get_test_loc('bazel/parse/BUILD') @@ -88,4 +88,4 @@ def compare_package_results(expected, result): e = expected_package.to_dict() e.pop('root_path') expected_packages.append(e) - assert expected_packages == result_packages + assert result_packages == expected_packages diff --git a/tests/packagedcode/test_conda.py b/tests/packagedcode/test_conda.py index db4d08530b4..24dc9343679 100644 --- a/tests/packagedcode/test_conda.py +++ b/tests/packagedcode/test_conda.py @@ -20,12 +20,12 @@ class TestConda(PackageTester): def test_parse_get_varialble(self): test_file = self.get_test_loc('conda/meta.yaml') results = conda.get_variables(test_file) - assert dict([(u'version', u'0.45.0'), (u'sha256', u'bc7512f2eef785b037d836f4cc6faded457ac277f75c6e34eccd12da7c85258f')]) == results + assert results == dict([(u'version', u'0.45.0'), (u'sha256', u'bc7512f2eef785b037d836f4cc6faded457ac277f75c6e34eccd12da7c85258f')]) def test_get_yaml_data(self): test_file = self.get_test_loc('conda/meta.yaml') results = conda.get_yaml_data(test_file) - assert (u'package', dict([(u'name', u'abeona'), (u'version', u'0.45.0')])) == list(results.items())[0] + assert list(results.items())[0] == (u'package', dict([(u'name', u'abeona'), (u'version', u'0.45.0')])) def test_parse(self): test_file = self.get_test_loc('conda/meta.yaml') @@ -39,4 +39,4 @@ def test_root_dir(self): codebase = Codebase(test_dir) manifest_resource = codebase.get_resource_from_path(test_file, absolute=True) proot = conda.CondaPackage.get_package_root(manifest_resource, codebase) - assert test_dir == proot.location + assert proot.location == test_dir diff --git a/tests/packagedcode/test_debian.py b/tests/packagedcode/test_debian.py index 5403b20fa76..fcac4da0083 100644 --- a/tests/packagedcode/test_debian.py +++ b/tests/packagedcode/test_debian.py @@ -38,7 +38,7 @@ def test_basic_rootfs_with_licenses_and_copyrights(self): def test_get_installed_packages_should_not_fail_on_rootfs_without_installed_debian_packages(self): test_rootfs = self.get_temp_dir() result = list(debian.get_installed_packages(test_rootfs)) - assert [] == result + assert result == [] class TestDebian(PackageTester): @@ -47,7 +47,7 @@ class TestDebian(PackageTester): def test_parse_status_file_not_a_status_file(self): test_file = self.get_test_loc('debian/not-a-status-file') test_packages = list(debian.parse_status_file(test_file)) - assert [] == test_packages + assert test_packages == [] def test_parse_status_file_non_existing_file(self): test_file = os.path.join(self.get_test_loc('debian'), 'foobarbaz') @@ -77,7 +77,7 @@ def test_parse_end_to_end(self): test_file = os.path.join(test_info_dir, 'status') packages = list(debian.parse_status_file(test_file, distro='ubuntu')) - assert 1 == len(packages) + assert len(packages) == 1 test_package = packages[0] @@ -90,8 +90,8 @@ def test_parse_end_to_end(self): resources = test_package.get_list_of_installed_files(test_info_dir) - assert 4 == len(resources) - assert expected == resources + assert len(resources) == 4 + assert resources == expected def test_get_installed_packages_ubuntu_with_missing_md5sums(self): test_root_dir = self.get_test_loc('debian/ubuntu-var-lib-dpkg/') @@ -114,7 +114,7 @@ def test_missing_md5sum_file(self): qualifiers={'arch':'amd64'} ) - assert [] == test_pkg.get_list_of_installed_files(test_info_dir) + assert test_pkg.get_list_of_installed_files(test_info_dir) == [] @skipIf(on_windows, 'File names cannot contain colons on Windows') def test_multi_arch_is_same(self): @@ -138,8 +138,8 @@ def test_multi_arch_is_same(self): results = test_pkg.get_list_of_installed_files(test_info_dir) - assert 6 == len(results) - assert expected == results + assert len(results) == 6 + assert results == expected def test_multi_arch_is_foreign(self): test_info_dir = self.get_test_loc('debian/foreign-multi-arch') @@ -169,8 +169,8 @@ def test_multi_arch_is_foreign(self): results = test_pkg.get_list_of_installed_files(test_info_dir) - assert 14 == len(results) - assert expected == results + assert len(results) == 14 + assert results == expected def test_multi_arch_is_missing(self): test_info_dir = self.get_test_loc('debian/missing-multi-arch') @@ -189,5 +189,5 @@ def test_multi_arch_is_missing(self): ] results = test_pkg.get_list_of_installed_files(test_info_dir) - assert 5 == len(results) - assert expected == results + assert len(results) == 5 + assert results == expected diff --git a/tests/packagedcode/test_debian_copyright.py b/tests/packagedcode/test_debian_copyright.py index 0e0bbe5227b..9fdd5a613af 100644 --- a/tests/packagedcode/test_debian_copyright.py +++ b/tests/packagedcode/test_debian_copyright.py @@ -39,7 +39,7 @@ def check_expected(test_loc, expected_loc, regen=False): expected ]) - assert expected == result + assert result == expected def relative_walk(dir_path): diff --git a/tests/packagedcode/test_gemfile_lock.py b/tests/packagedcode/test_gemfile_lock.py index 04fca912fbc..5580aecef0e 100644 --- a/tests/packagedcode/test_gemfile_lock.py +++ b/tests/packagedcode/test_gemfile_lock.py @@ -38,9 +38,9 @@ def check_results(self, results, expected_loc, regen=False): expected = json.load(ex) try: - assert expected == results + assert results == expected except AssertionError: - assert json.dumps(expected, indent=2) == json.dumps(results, indent=2) + assert json.dumps(results, indent=2) == json.dumps(expected, indent=2) def check_gemfile_lock(self, test_file, expected_loc, regen=False): test_file = self.get_test_loc(test_file) @@ -97,7 +97,7 @@ def test_get_options(self): (None, None) ] results = [gemfile_lock.get_option(t) for t in test] - assert expected == results + assert results == expected def test_NAME_VERSION_re(self): import re @@ -139,7 +139,7 @@ def test_NAME_VERSION_re(self): results = [(nv(x).group('name'), nv(x).group('version'),) for x in test] - assert expected == results + assert results == expected def test_DEPS_re(self): test = '''DEPENDENCIES @@ -181,7 +181,7 @@ def test_DEPS_re(self): results.append((name, version, pinned,)) else: results.append(None) - assert expected == results + assert results == expected def test_SPEC_DEPS_re(self): test = ''' specs: @@ -207,7 +207,7 @@ def test_SPEC_DEPS_re(self): nv = gemfile_lock.SPEC_DEPS results = [(nv(x).group('name'), nv(x).group('version'),) for x in test if nv(x)] - assert expected == results + assert results == expected def test_SPEC_SUB_DEPS_re(self): test = ''' specs: @@ -238,7 +238,7 @@ def test_SPEC_SUB_DEPS_re(self): nv = gemfile_lock.SPEC_SUB_DEPS results = [(nv(x).group('name'), nv(x).group('version'),) for x in test if nv(x)] - assert expected == results + assert results == expected def test_Gem_as_nv_tree(self): Gem = gemfile_lock.Gem @@ -309,14 +309,14 @@ def test_Gem_flatten(self): (g, b), ]) results = sorted(a.flatten()) - assert expected == results + assert results == expected def test_Gem_as_nv_tree_with_no_deps(self): Gem = gemfile_lock.Gem a = Gem('a', '1') expected = {('a', '1'): {}} results = a.as_nv_tree() - assert expected == results + assert results == expected def test_Gem_to_dict(self): @@ -343,7 +343,7 @@ def test_Gem_to_dict(self): ] results = a.to_dict() - assert expected == list(results.items()) + assert list(results.items()) == expected def test_GemfileLockParser_can_parse_a_flat_list_of_deps(self): test_file = 'gemfile_lock/as_deps/Gemfile.lock' diff --git a/tests/packagedcode/test_godeps.py b/tests/packagedcode/test_godeps.py index 56e74ee989a..d4311d35e8b 100644 --- a/tests/packagedcode/test_godeps.py +++ b/tests/packagedcode/test_godeps.py @@ -54,7 +54,7 @@ def test_parse_basic(self): gd = godeps.Godep() gd.loads(test) results = gd.to_dict() - assert expected == results + assert results == expected def check_package(self, test_file, expected_file, regen=False): test_loc = self.get_test_loc(test_file) @@ -66,7 +66,7 @@ def check_package(self, test_file, expected_file, regen=False): json.dump(results, ex, indent=2) with io.open(expected_loc, encoding='utf-8') as ex: expected = json.load(ex) - assert sorted(expected.items()) == sorted(results.items()) + assert sorted(results.items()) == sorted(expected.items()) def test_godeps_godeps_godeps_json_comments(self): self.check_package( diff --git a/tests/packagedcode/test_haxe.py b/tests/packagedcode/test_haxe.py index bd40e34bfd0..d1d2e402ba4 100644 --- a/tests/packagedcode/test_haxe.py +++ b/tests/packagedcode/test_haxe.py @@ -47,4 +47,4 @@ def test_root_dir(self): codebase = Codebase(test_dir) manifest_resource = codebase.get_resource_from_path(test_file, absolute=True) proot = haxe.HaxePackage.get_package_root(manifest_resource, codebase) - assert test_dir == proot.location + assert proot.location == test_dir diff --git a/tests/packagedcode/test_jar_manifest.py b/tests/packagedcode/test_jar_manifest.py index 7209c1cedf2..e725f189a87 100644 --- a/tests/packagedcode/test_jar_manifest.py +++ b/tests/packagedcode/test_jar_manifest.py @@ -39,7 +39,7 @@ def check_parse_manifest(self, test_manifest, regen=False): with io.open(expected_manifest_loc, encoding='utf-8') as ex: expected = json.load(ex) - assert json.dumps(expected) == json.dumps(parsed_manifest) + assert json.dumps(parsed_manifest) == json.dumps(expected) def check_get_normalized_package_data(self, test_manifest, regen=False): """ @@ -60,7 +60,7 @@ def check_get_normalized_package_data(self, test_manifest, regen=False): with io.open(expected_json_loc, 'rb') as ex: expected = json.load(ex, encoding='utf-8') - assert json.dumps(expected) == json.dumps(package) + assert json.dumps(package) == json.dumps(expected) class TestMavenMisc(BaseParseManifestCase): diff --git a/tests/packagedcode/test_licensing.py b/tests/packagedcode/test_licensing.py index 86946c89edf..219544f5635 100644 --- a/tests/packagedcode/test_licensing.py +++ b/tests/packagedcode/test_licensing.py @@ -14,9 +14,9 @@ class TestLicensing(TestCase): def test_get_normalized_expression(self): - assert 'mit' == get_normalized_expression('mit') - assert 'apache-2.0 AND unknown' == get_normalized_expression('mit or asasa or Apache-2.0') - assert 'apache-2.0 AND unknown' == get_normalized_expression('mit or asasa or Apache-2.0') - assert 'mit OR apache-2.0' == get_normalized_expression('mit asasa or Apache-2.0') + assert get_normalized_expression('mit') == 'mit' + assert get_normalized_expression('mit or asasa or Apache-2.0') == 'apache-2.0 AND unknown' + assert get_normalized_expression('mit or asasa or Apache-2.0') == 'apache-2.0 AND unknown' + assert get_normalized_expression('mit asasa or Apache-2.0') == 'mit OR apache-2.0' assert get_normalized_expression('') is None assert get_normalized_expression(None) is None diff --git a/tests/packagedcode/test_maven.py b/tests/packagedcode/test_maven.py index 49d55c06fa6..9d68e467ec5 100644 --- a/tests/packagedcode/test_maven.py +++ b/tests/packagedcode/test_maven.py @@ -66,12 +66,12 @@ def compare_results(results, test_pom_loc, expected_json_loc, regen=False): results_dump = json.dumps(results, indent=2) expected_dump = json.dumps(expected, indent=2) try: - assert expected_dump == results_dump + assert results_dump == expected_dump except AssertionError: test_pom_loc = 'file://' + test_pom_loc expected_json_loc = 'file://' + expected_json_loc expected = [test_pom_loc, expected_json_loc, expected_dump] - assert '\n'.join(expected) == results_dump + assert results_dump == '\n'.join(expected) def parse_pom(location=None, text=None, check_is_pom=False): @@ -119,15 +119,15 @@ class TestMavenMisc(BaseMavenCase): def test_parse_pom_non_pom(self): test_pom_loc = self.get_test_loc('maven_misc/non-maven.pom') results = parse_pom(location=test_pom_loc, check_is_pom=True) - assert {} == results + assert results == {} self.check_parse_pom(test_pom_loc, regen=False) def test_MavenPom_simple_creation(self): test_loc = self.get_test_loc('maven_misc/mini-pom.xml') pom = maven.MavenPom(test_loc) - assert 'activemq-camel' == pom.artifact_id + assert pom.artifact_id == 'activemq-camel' # note: there has been no parent resolving yet - assert None == pom.group_id + assert pom.group_id == None def test_pom_dependencies(self): test_loc = self.get_test_loc('maven2/activemq-camel-pom.xml') @@ -153,7 +153,7 @@ def test_pom_dependencies(self): expected = [(s, sorted(v)) for s, v in expected] results = [(s, sorted(v)) for s, v in pom.dependencies.items()] - assert expected == results + assert results == expected def test_pom_issue_management_properties_are_resolved(self): test_loc = self.get_test_loc('maven2/xml-format-maven-plugin-3.0.6.pom') @@ -164,7 +164,7 @@ def test_pom_issue_management_properties_are_resolved(self): (u'url', 'https://github.com/acegi/xml-format-maven-plugin/issues')] ) result = pom.issue_management - assert expected == result + assert result == expected def test_pom_dependencies_are_resolved(self): test_loc = self.get_test_loc('maven2/activemq-camel-pom.xml') @@ -190,7 +190,7 @@ def test_pom_dependencies_are_resolved(self): expected = [(s, sorted(v)) for s, v in expected] results = [(s, sorted(v)) for s, v in pom.dependencies.items()] - assert expected == results + assert results == expected def test_parse_to_package_base(self): test_file = self.get_test_loc('maven_misc/spring-beans-4.2.2.RELEASE.pom.xml') @@ -205,7 +205,7 @@ def test_parse_to_package_then_back(self): test_file = self.get_test_loc('maven_misc/spring-beans-4.2.2.RELEASE.pom.xml') package = maven.parse(test_file) package2 = maven.MavenPomPackage(**package.to_dict(exclude_properties=True)) - assert package.to_dict().items() == package2.to_dict().items() + assert package2.to_dict().items() == package.to_dict().items() def test_package_root_is_properly_returned_for_metainf_poms(self): from packagedcode.plugin_package import PackageScanner @@ -217,7 +217,7 @@ def test_package_root_is_properly_returned_for_metainf_poms(self): manifest_resource.packages.append(packages[0].to_dict()) manifest_resource.save(codebase) proot = maven.MavenPomPackage.get_package_root(manifest_resource, codebase) - assert 'activiti-image-generator-7-201802-EA-sources.jar-extract' == proot.name + assert proot.name == 'activiti-image-generator-7-201802-EA-sources.jar-extract' def test_package_dependency_not_missing(self): test_file = self.get_test_loc('maven2/log4j-pom.xml') @@ -232,21 +232,21 @@ def test_resolve_properties(self): value = '${groupId}.mycomponent' expected = 'org.apache.mycomponent' test = maven.MavenPom._replace_props(value, properties) - assert expected == test + assert test == expected def test_resolve_properties_with_expression(self): properties = {'groupId': 'org.apache'} value = '${groupId.substring(4)}.mycomponent' expected = 'apache.mycomponent' test = maven.MavenPom._replace_props(value, properties) - assert expected == test + assert test == expected def test_resolve_properties_with_substring_expression(self): properties = {'groupId': 'org.apache'} value = '${groupId.substring(0,3)}.mycomponent' expected = 'org.mycomponent' test = maven.MavenPom._replace_props(value, properties) - assert expected == test + assert test == expected def test_get_properties(self): test_loc = self.get_test_loc('maven2_props/multiple/pom.xml') @@ -282,7 +282,7 @@ def test_get_properties(self): 'pkgVersion': '1.4', } - assert expected == test + assert test == expected def test_get_properties_single(self): test_loc = self.get_test_loc('maven2_props/single/pom.xml') @@ -300,7 +300,7 @@ def test_get_properties_single(self): 'project.version': None, 'version': None } - assert expected == test + assert test == expected def test_get_properties_advanced(self): test_loc = self.get_test_loc('maven2_props/xml-format-maven-plugin-3.0.6.pom') @@ -333,7 +333,7 @@ def test_get_properties_advanced(self): 'project.version': '3.0.6', 'version': '3.0.6' } - assert expected == test + assert test == expected def test_parse_can_run_without_pom_check(self): test_loc = self.get_test_loc('maven_misc/ant-1.6.5.maven') @@ -346,7 +346,7 @@ def test_parse_will_load_extra_pom_properties_if_file_present(self): # there is a file at maven2_props/props_file/activiti-image-generator/pom.properties test_loc = self.get_test_loc('maven2_props/props_file/activiti-image-generator/pom.xml') pom = maven.parse(test_loc, check_is_pom=False) - assert 'org.activiti' == pom.namespace + assert pom.namespace == 'org.activiti' class TestMavenComputeNormalizedLicense(testcase.FileBasedTesting): @@ -359,7 +359,7 @@ def test_compute_normalized_license_two_names_only(self): ] result = maven.compute_normalized_license(declared_license) expected = 'apache-2.0 AND mit' - assert expected == result + assert result == expected def test_compute_normalized_license_tree_nodes(self): declared_license = [ @@ -368,7 +368,7 @@ def test_compute_normalized_license_tree_nodes(self): ] result = maven.compute_normalized_license(declared_license) expected = 'apache-2.0 AND mit' - assert expected == result + assert result == expected def test_compute_normalized_license_with_unknown_url(self): declared_license = [ @@ -377,7 +377,7 @@ def test_compute_normalized_license_with_unknown_url(self): ] result = maven.compute_normalized_license(declared_license) expected = 'apache-2.0 AND mit' - assert expected == result + assert result == expected def test_compute_normalized_license_with_unknown_url_known_comments(self): declared_license = [ @@ -386,7 +386,7 @@ def test_compute_normalized_license_with_unknown_url_known_comments(self): ] result = maven.compute_normalized_license(declared_license) expected = 'apache-2.0 AND mit' - assert expected == result + assert result == expected def test_compute_normalized_license_with_unknown_url_unknown_comments(self): declared_license = [ @@ -395,7 +395,7 @@ def test_compute_normalized_license_with_unknown_url_unknown_comments(self): ] result = maven.compute_normalized_license(declared_license) expected = 'apache-2.0 AND mit' - assert expected == result + assert result == expected def test_compute_normalized_license_unknown_name(self): declared_license = [ @@ -404,7 +404,7 @@ def test_compute_normalized_license_unknown_name(self): ] result = maven.compute_normalized_license(declared_license) expected = '(unknown AND apache-2.0) AND mit' - assert expected == result + assert result == expected def test_compute_normalized_license_same_name_and_url(self): declared_license = [ @@ -413,7 +413,7 @@ def test_compute_normalized_license_same_name_and_url(self): ] result = maven.compute_normalized_license(declared_license) expected = 'apache-2.0 AND mit' - assert expected == result + assert result == expected def test_compute_normalized_license_same_name_url_comments(self): declared_license = [ @@ -422,7 +422,7 @@ def test_compute_normalized_license_same_name_url_comments(self): ] result = maven.compute_normalized_license(declared_license) expected = 'apache-2.0 AND mit' - assert expected == result + assert result == expected def test_compute_normalized_license_with_url_invalid(self): declared_license = [ @@ -430,7 +430,7 @@ def test_compute_normalized_license_with_url_invalid(self): ] result = maven.compute_normalized_license(declared_license) expected = 'mit' - assert expected == result + assert result == expected def test_compute_normalized_license_with_duplicated_license(self): declared_license = [ @@ -439,7 +439,7 @@ def test_compute_normalized_license_with_duplicated_license(self): ] result = maven.compute_normalized_license(declared_license) expected = 'lgpl-2.0-plus' - assert expected == result + assert result == expected def relative_walk(dir_path): diff --git a/tests/packagedcode/test_nevra.py b/tests/packagedcode/test_nevra.py index 0766ca15fdd..cdd07ab64b6 100644 --- a/tests/packagedcode/test_nevra.py +++ b/tests/packagedcode/test_nevra.py @@ -15,39 +15,39 @@ class TestNevra(): def test_rpm_details_cups(self): expected1 = (None, 'cups', '1.1.17', '13.3.29', 'src') output1 = nevra.from_name('cups-1.1.17-13.3.29.src') - assert expected1 == output1 + assert output1 == expected1 def test_rpm_details_imagemagick(self): expected2 = (None, 'ImageMagick-c++-devel', '6.0.7.1', '14', 'sparc') output2 = nevra.from_name('ImageMagick-c++-devel-6.0.7.1-14.sparc') - assert expected2 == output2 + assert output2 == expected2 def test_rpm_details_flash_player(self): expected3 = (None, 'flash-player', '11.0.1.152', '2.1.1', 'nosrc') output3 = nevra.from_name('flash-player-11.0.1.152-2.1.1.nosrc') - assert expected3 == output3 + assert output3 == expected3 def test_rpm_details_firmware(self): expected4 = (None, 'FirmwareUpdateKit', '1.6', '6.1.2', 'src') output4 = nevra.from_name('FirmwareUpdateKit-1.6-6.1.2.src') - assert expected4 == output4 + assert output4 == expected4 def test_rpm_details_2048(self): expected5 = (None, '2048-cli', '0.9', '4.git20141214.723738c.el6', 'src') output5 = nevra.from_name('2048-cli-0.9-4.git20141214.723738c.el6.src') - assert expected5 == output5 + assert output5 == expected5 def test_rpm_details_barebones(self): expected6 = (None, 'BareBonesBrowserLaunch', '3.1', '1.el6', 'src') output6 = nevra.from_name('BareBonesBrowserLaunch-3.1-1.el6.src') - assert expected6 == output6 + assert output6 == expected6 def test_rpm_details_imagemagickcpp(self): expected7 = (None, 'ImageMagick-c++', '5.5.6', '15', 'i386') output7 = nevra.from_name('ImageMagick-c++-5.5.6-15.i386.rpm') - assert expected7 == output7 + assert output7 == expected7 def test_rpm_details_xfree(self): expected8 = (None, 'XFree86-ISO8859-9-75dpi-fonts', '4.3.0', '97.EL', 'x86_64') output8 = nevra.from_name('XFree86-ISO8859-9-75dpi-fonts-4.3.0-97.EL.x86_64.rpm') - assert expected8 == output8 + assert output8 == expected8 diff --git a/tests/packagedcode/test_npm.py b/tests/packagedcode/test_npm.py index 2858f4cb727..abfb631d15e 100644 --- a/tests/packagedcode/test_npm.py +++ b/tests/packagedcode/test_npm.py @@ -19,41 +19,41 @@ class TestNpm(PackageTester): def test_parse_person(self): test = 'Isaac Z. Schlueter (http://blog.izs.me)' - assert ('Isaac Z. Schlueter', 'i@izs.me' , 'http://blog.izs.me') == npm.parse_person(test) + assert npm.parse_person(test) == ('Isaac Z. Schlueter', 'i@izs.me' , 'http://blog.izs.me') def test_parse_person2(self): test = 'Isaac Z. Schlueter ' - assert ('Isaac Z. Schlueter', 'i@izs.me' , None) == npm.parse_person(test) + assert npm.parse_person(test) == ('Isaac Z. Schlueter', 'i@izs.me' , None) def test_parse_person3(self): test = 'Isaac Z. Schlueter (http://blog.izs.me)' - assert ('Isaac Z. Schlueter', None , 'http://blog.izs.me') == npm.parse_person(test) + assert npm.parse_person(test) == ('Isaac Z. Schlueter', None , 'http://blog.izs.me') def test_parse_person4(self): test = 'Isaac Z. Schlueter' - assert ('Isaac Z. Schlueter', None , None) == npm.parse_person(test) + assert npm.parse_person(test) == ('Isaac Z. Schlueter', None , None) def test_parse_person5(self): test = ' (http://blog.izs.me)' - assert (None, u'i@izs.me', u'http://blog.izs.me') == npm.parse_person(test) + assert npm.parse_person(test) == (None, u'i@izs.me', u'http://blog.izs.me') def test_parse_person_dict(self): test = {'name': 'Isaac Z. Schlueter'} - assert ('Isaac Z. Schlueter', None, None) == npm.parse_person(test) + assert npm.parse_person(test) == ('Isaac Z. Schlueter', None, None) def test_parse_person_dict2(self): test = {'email': 'me@this.com'} - assert (None, 'me@this.com', None) == npm.parse_person(test) + assert npm.parse_person(test) == (None, 'me@this.com', None) def test_parse_person_dict3(self): test = {'url': 'http://example.com'} - assert (None, None, 'http://example.com') == npm.parse_person(test) + assert npm.parse_person(test) == (None, None, 'http://example.com') def test_parse_person_dict4(self): test = {'name': 'Isaac Z. Schlueter', 'email': 'me@this.com', 'url': 'http://example.com'} - assert ('Isaac Z. Schlueter', 'me@this.com' , 'http://example.com') == npm.parse_person(test) + assert npm.parse_person(test) == ('Isaac Z. Schlueter', 'me@this.com' , 'http://example.com') def test_parse_dist_with_string_values(self): test_file = self.get_test_loc('npm/dist/package.json') @@ -256,7 +256,7 @@ def test_vcs_repository_mapper(self): package = MockPackage() repo = 'git+git://bitbucket.org/vendor/my-private-repo.git' result = npm.vcs_repository_mapper(repo, package) - assert repo == result.vcs_url + assert result.vcs_url == repo def test_vcs_repository_mapper_handles_version(self): package = MockPackage() @@ -264,7 +264,7 @@ def test_vcs_repository_mapper_handles_version(self): rev = '213123aefd' expected = 'https://bitbucket.org/vendor/my-private-repo.git@213123aefd' result = npm.vcs_repository_mapper(repo, package, rev) - assert expected == result.vcs_url + assert result.vcs_url == expected def test_vcs_repository_mapper_handles_version_on_gh(self): package = MockPackage() @@ -272,7 +272,7 @@ def test_vcs_repository_mapper_handles_version_on_gh(self): rev = '213123aefd' expected = 'https://github.com/vendor/my-private-repo@213123aefd' result = npm.vcs_repository_mapper(repo, package, rev) - assert expected == result.vcs_url + assert result.vcs_url == expected def test_npm_get_package_resources(self): test_loc = self.get_test_loc('npm/get_package_resources') @@ -284,7 +284,7 @@ def test_npm_get_package_resources(self): 'get_package_resources/this-should-be-returned' ] results = [r.path for r in npm.NpmPackage.get_package_resources(root, codebase)] - assert expected == results + assert results == expected class MockPackage(object): pass diff --git a/tests/packagedcode/test_package_models.py b/tests/packagedcode/test_package_models.py index 9bd3e8615e3..c450e610091 100644 --- a/tests/packagedcode/test_package_models.py +++ b/tests/packagedcode/test_package_models.py @@ -56,7 +56,7 @@ def test_Package_creation_and_dump(self): ('repository_download_url', None), ('api_data_url', None), ] - assert expected == list(package.to_dict().items()) + assert list(package.to_dict().items()) == expected def test_Package_simple(self): package = Package( @@ -83,7 +83,7 @@ def test_Package_model_qualifiers_are_serialized_as_mappings(self): version='23', qualifiers=dict(this='that') ) - assert dict(this='that') == package.to_dict()['qualifiers'] + assert package.to_dict()['qualifiers'] == dict(this='that') def test_Package_model_qualifiers_are_kept_as_mappings(self): package = models.Package( @@ -92,7 +92,7 @@ def test_Package_model_qualifiers_are_kept_as_mappings(self): version='23', qualifiers=dict(this='that') ) - assert dict(this='that') == package.qualifiers + assert package.qualifiers == dict(this='that') def test_Package_model_qualifiers_are_converted_to_mappings(self): package = models.Package( @@ -101,7 +101,7 @@ def test_Package_model_qualifiers_are_converted_to_mappings(self): version='23', qualifiers='this=that' ) - assert dict(this='that') == package.qualifiers + assert package.qualifiers == dict(this='that') def test_Package_full(self): diff --git a/tests/packagedcode/test_phpcomposer.py b/tests/packagedcode/test_phpcomposer.py index 769186d98cc..fc3f67d4624 100644 --- a/tests/packagedcode/test_phpcomposer.py +++ b/tests/packagedcode/test_phpcomposer.py @@ -34,8 +34,7 @@ def test_parse_person(self): expected = [('Nils Adermann', 'Developer', 'naderman@naderman.de', 'http://www.naderman.de'), ('Jordi Boggiano', 'Developer', 'j.boggiano@seld.be', 'http://seld.be') ] - assert expected == list(phpcomposer.parse_person(test)) - + assert list(phpcomposer.parse_person(test)) == expected def test_parse_atimer(self): test_file = self.get_test_loc('phpcomposer/a-timer/composer.json') expected_loc = self.get_test_loc('phpcomposer/a-timer/composer.json.expected') diff --git a/tests/packagedcode/test_plugin.py b/tests/packagedcode/test_plugin.py index b3af5ace2cc..b34f103dad7 100644 --- a/tests/packagedcode/test_plugin.py +++ b/tests/packagedcode/test_plugin.py @@ -25,7 +25,7 @@ def test_package_list_command(self, regen=False): if regen: with open(expected_file, 'w') as ef: ef.write(result.output) - assert open(expected_file).read() == result.output + assert result.output == open(expected_file).read() @skipIf(on_windows, 'somehow this fails on Windows') def test_package_command_scan_python(self): diff --git a/tests/packagedcode/test_pypi.py b/tests/packagedcode/test_pypi.py index fde41477c01..31dd26fd7d6 100644 --- a/tests/packagedcode/test_pypi.py +++ b/tests/packagedcode/test_pypi.py @@ -26,22 +26,22 @@ class TestPyPi(PackageTester): def test_parse(self): test_file = self.get_test_loc('pypi/setup.py/setup.py') package = pypi.parse(test_file) - assert 'scancode-toolkit' == package.name - assert '1.5.0' == package.version - assert 'ScanCode' == package.parties[0].name - assert ('ScanCode is a tool to scan code for license, ' - 'copyright and other interesting facts.') == package.description - assert 'https://github.com/nexB/scancode-toolkit' == package.homepage_url + assert package.name == 'scancode-toolkit' + assert package.version == '1.5.0' + assert package.parties[0].name == 'ScanCode' + assert package.description == ('ScanCode is a tool to scan code for license, ' + 'copyright and other interesting facts.') + assert package.homepage_url == 'https://github.com/nexB/scancode-toolkit' def test_parse_metadata(self): test_folder = self.get_test_loc('pypi') test_file = os.path.join(test_folder, 'metadata.json') package = pypi.parse_metadata(test_file) - assert 'six' == package.name - assert '1.10.0' == package.version - assert 'Python 2 and 3 compatibility utilities' == package.description + assert package.name == 'six' + assert package.version == '1.10.0' + assert package.description == 'Python 2 and 3 compatibility utilities' assert 'MIT' in package.declared_license['license'] - assert ['License :: OSI Approved :: MIT License'] == package.declared_license['classifiers'] + assert package.declared_license['classifiers'] == ['License :: OSI Approved :: MIT License'] expected_classifiers = [ "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", @@ -49,25 +49,25 @@ def test_parse_metadata(self): "Topic :: Software Development :: Libraries", "Topic :: Utilities" ] - assert expected_classifiers == package.keywords + assert package.keywords == expected_classifiers expected = [ dict([ ('type', u'person'), ('role', u'contact'), ('name', u'Benjamin Peterson'), ('email', None), ('url', None)]) ] - assert expected == [p.to_dict() for p in package.parties] - assert 'http://pypi.python.org/pypi/six/' == package.homepage_url + assert [p.to_dict() for p in package.parties] == expected + assert package.homepage_url == 'http://pypi.python.org/pypi/six/' def test_parse_pkg_info(self): test_file = self.get_test_loc('pypi/PKG-INFO') package = pypi.parse_pkg_info(test_file) - assert 'TicketImport' == package.name - assert '0.7a' == package.version - assert 'Import CSV and Excel files' == package.description + assert package.name == 'TicketImport' + assert package.version == '0.7a' + assert package.description == 'Import CSV and Excel files' assert 'BSD' in package.declared_license - assert 'http://nexb.com' == package.homepage_url + assert package.homepage_url == 'http://nexb.com' expected = [dict([('type', u'person'), ('role', u''), ('name', u'Francois Granade'), ('email', None), ('url', None)])] - assert expected == [p.to_dict() for p in package.parties] + assert [p.to_dict() for p in package.parties] == expected @skipIf(on_windows, 'Somehow this fails on Windows') def test_parse_setup_py_arpy(self): @@ -410,8 +410,8 @@ def test_pipfile_lock_sample5(self): def test_parse_with_dparse(self): test_file = self.get_test_loc('pypi/dparse/requirements.txt') dependencies = pypi.parse_with_dparse(test_file) - assert [DependentPackage(purl='pkg:pypi/lxml@3.4.4', requirement='==3.4.4', scope='dependencies', is_resolved=True), - DependentPackage(purl='pkg:pypi/requests@2.7.0', requirement='==2.7.0', scope='dependencies', is_resolved=True)] == dependencies + assert dependencies == [DependentPackage(purl='pkg:pypi/lxml@3.4.4', requirement='==3.4.4', scope='dependencies', is_resolved=True), + DependentPackage(purl='pkg:pypi/requests@2.7.0', requirement='==2.7.0', scope='dependencies', is_resolved=True)] FILENAME_LIST = [ @@ -477,6 +477,6 @@ def test_parse_setup_py_with_computed_versions(self, test_loc, expected_loc, reg ex, encoding='utf-8') try: - assert expected == results + assert results == expected except AssertionError: - assert json.dumps(expected, indent=2) == json.dumps(results, indent=2) + assert json.dumps(results, indent=2) == json.dumps(expected, indent=2) diff --git a/tests/packagedcode/test_pyrpm.py b/tests/packagedcode/test_pyrpm.py index 73557afe154..6bd5c4e7939 100644 --- a/tests/packagedcode/test_pyrpm.py +++ b/tests/packagedcode/test_pyrpm.py @@ -53,15 +53,15 @@ def test_rpm(self): 'to work and integrate best with Enlightenment.' ) - assert 'Eterm' == rpm[pyrpm.RPMTAG_NAME] == rpm.name - assert '0.9.3' == rpm[pyrpm.RPMTAG_VERSION] == rpm.version - assert '5mdv2007.0' == rpm[pyrpm.RPMTAG_RELEASE] - assert 'i586' == rpm[pyrpm.RPMTAG_ARCH] - assert 'BSD' == rpm[pyrpm.RPMTAG_COPYRIGHT] - assert description == rpm[pyrpm.RPMTAG_DESCRIPTION] + assert rpm[pyrpm.RPMTAG_NAME] == rpm.name == 'Eterm' + assert rpm[pyrpm.RPMTAG_VERSION] == rpm.version == '0.9.3' + assert rpm[pyrpm.RPMTAG_RELEASE] == '5mdv2007.0' + assert rpm[pyrpm.RPMTAG_ARCH] == 'i586' + assert rpm[pyrpm.RPMTAG_COPYRIGHT] == 'BSD' + assert rpm[pyrpm.RPMTAG_DESCRIPTION] == description assert rpm.is_binary is True - assert 'Eterm-0.9.3' == rpm.package - assert 'Eterm-0.9.3-5mdv2007.0.i586.rpm' == rpm.filename + assert rpm.package == 'Eterm-0.9.3' + assert rpm.filename == 'Eterm-0.9.3-5mdv2007.0.i586.rpm' expected = { 'arch': u'i586', @@ -83,4 +83,4 @@ def test_rpm(self): 'version': u'0.9.3' } - assert expected == rpm.to_dict() + assert rpm.to_dict() == expected diff --git a/tests/packagedcode/test_rpm.py b/tests/packagedcode/test_rpm.py index 0d3033a0df9..fea8d842362 100644 --- a/tests/packagedcode/test_rpm.py +++ b/tests/packagedcode/test_rpm.py @@ -66,7 +66,7 @@ def test_parse_to_package(self): ('repository_download_url', None), ('api_data_url', None), ] - assert expected == list(package.to_dict().items()) + assert list(package.to_dict().items()) == expected def test_pyrpm_basic(self): test_file = self.get_test_loc('rpm/header/python-glc-0.7.1-1.src.rpm') @@ -95,7 +95,7 @@ def test_pyrpm_basic(self): 'version': '0.7.1', } - assert expected == alltags + assert alltags == expected # tests that tags are all unicode assert all([isinstance(v, str) for v in alltags.values() if v]) @@ -121,9 +121,9 @@ def test_get_rpm_tags_(self): dist_url=None, is_binary=False, ) - assert expected == rpm.get_rpm_tags(test_file, include_desc=True) + assert rpm.get_rpm_tags(test_file, include_desc=True) == expected expected = expected._replace(description=None) - assert expected == rpm.get_rpm_tags(test_file, include_desc=False) + assert rpm.get_rpm_tags(test_file, include_desc=False) == expected def test_packagedcode_rpm_tags_and_info_on_non_rpm_file(self): test_file = self.get_test_loc('rpm/README.txt') @@ -139,7 +139,7 @@ def check_json(result, expected_file, regen=False): with io.open(expected_file, encoding='utf-8') as exp: expected = json.load(exp) - assert json.dumps(expected) == json.dumps(result) + assert json.dumps(result) == json.dumps(expected) class TestRpmTags(FileBasedTesting): diff --git a/tests/packagedcode/test_rubygems.py b/tests/packagedcode/test_rubygems.py index b812350ac29..cd09356e1e2 100644 --- a/tests/packagedcode/test_rubygems.py +++ b/tests/packagedcode/test_rubygems.py @@ -118,7 +118,7 @@ def check_rubygem(self): with io.open(expected_json_loc, encoding='utf-8') as ex: expected = json.load(ex) - assert expected == package + assert package == expected if isinstance(test_name, bytes): test_name = test_name.decode('utf-8') diff --git a/tests/packagedcode/test_utils.py b/tests/packagedcode/test_utils.py index 579d8a7ffc8..36017910488 100644 --- a/tests/packagedcode/test_utils.py +++ b/tests/packagedcode/test_utils.py @@ -19,129 +19,126 @@ def test_normalize_vcs_url_basic(self): url = 'https://pear2.php.net' result = normalize_vcs_url(url) expected = 'https://pear2.php.net' - assert expected == result + assert result == expected def test_normalize_vcs_url_svn(self): url = 'http://svn.example.org/projectA/' result = normalize_vcs_url(url) expected = 'http://svn.example.org/projectA/' - assert expected == result + assert result == expected def test_normalize_vcs_url_github(self): url = 'https://github.com/igorw/monolog' result = normalize_vcs_url(url) expected = 'https://github.com/igorw/monolog' - assert expected == result + assert result == expected def test_normalize_vcs_url_bitbucket(self): url = 'git@bitbucket.org:vendor/my-private-repo.git' result = normalize_vcs_url(url) expected = 'https://bitbucket.org/vendor/my-private-repo.git' - assert expected == result + assert result == expected def test_normalize_vcs_url_does_not_pad_git_plus(self): url = 'git+git://bitbucket.org/vendor/my-private-repo.git' result = normalize_vcs_url(url) - assert url == result + assert result == url def test_normalize_vcs_url_does_not_pad_git_plus2(self): url = 'git+https://github.com/stevepapa/angular2-autosize.git' result = normalize_vcs_url(url) expected = 'git+https://github.com/stevepapa/angular2-autosize.git' - assert expected == result + assert result == expected def test_normalize_vcs_url_0(self): test = 'npm/npm' expected = 'https://github.com/npm/npm' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_1(self): test = 'gist:11081aaa281' expected = 'https://gist.github.com/11081aaa281' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_2(self): test = 'bitbucket:example/repo' expected = 'https://bitbucket.org/example/repo' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_3(self): test = 'gitlab:another/repo' expected = 'https://gitlab.com/another/repo' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_4(self): test = 'expressjs/serve-static' expected = 'https://github.com/expressjs/serve-static' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_5(self): test = 'git://github.com/angular/di.js.git' expected = 'git://github.com/angular/di.js.git' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_6(self): test = 'git://github.com/hapijs/boom' expected = 'git://github.com/hapijs/boom' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_7(self): test = 'git@github.com:balderdashy/waterline-criteria.git' expected = 'https://github.com/balderdashy/waterline-criteria.git' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_8(self): test = 'http://github.com/ariya/esprima.git' expected = 'http://github.com/ariya/esprima.git' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_9(self): test = 'http://github.com/isaacs/nopt' expected = 'http://github.com/isaacs/nopt' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_10(self): test = 'https://github.com/chaijs/chai' expected = 'https://github.com/chaijs/chai' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_11(self): test = 'https://github.com/christkv/kerberos.git' expected = 'https://github.com/christkv/kerberos.git' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_12(self): test = 'https://gitlab.com/foo/private.git' expected = 'https://gitlab.com/foo/private.git' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_13(self): test = 'git@gitlab.com:foo/private.git' expected = 'https://gitlab.com/foo/private.git' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_git_repo_url_without_slash_slash(self): test = 'git@github.com/Filirom1/npm2aur.git' expected = 'https://github.com/Filirom1/npm2aur.git' - assert expected == normalize_vcs_url(test) + assert normalize_vcs_url(test) == expected def test_normalize_vcs_url_does_not_fail_on_empty(self): - assert None == normalize_vcs_url(None) - assert None == normalize_vcs_url('') - assert None == normalize_vcs_url(' ') + assert normalize_vcs_url(None) == None + assert normalize_vcs_url('') == None + assert normalize_vcs_url(' ') == None def test_combine_expressions_with_empty_input(self): - assert None == combine_expressions(None) - assert None == combine_expressions([]) + assert combine_expressions(None) == None + assert combine_expressions([]) == None def test_combine_expressions_with_regular(self): - assert 'mit AND apache-2.0' == combine_expressions( - ['mit', 'apache-2.0']) + assert combine_expressions(['mit', 'apache-2.0']) == 'mit AND apache-2.0' def test_combine_expressions_with_duplicated_elements(self): - assert 'mit AND apache-2.0' == combine_expressions( - ['mit', 'apache-2.0', 'mit']) + assert combine_expressions(['mit', 'apache-2.0', 'mit']) == 'mit AND apache-2.0' def test_combine_expressions_with_or_relationship(self): - assert 'mit OR apache-2.0' == combine_expressions( - ['mit', 'apache-2.0'], 'OR') + assert combine_expressions(['mit', 'apache-2.0'], 'OR') == 'mit OR apache-2.0' diff --git a/tests/packagedcode/test_win_pe.py b/tests/packagedcode/test_win_pe.py index b126352c6c6..db8365fba94 100644 --- a/tests/packagedcode/test_win_pe.py +++ b/tests/packagedcode/test_win_pe.py @@ -33,7 +33,7 @@ def check_win_pe(self, test_file, regen=False): with io.open(expected_file, encoding='utf-8') as expect: expected = json.load(expect) - assert expected == result + assert result == expected def test_win_pe_ctypes_test_pyd(self): test_file = self.get_test_loc('win_pe/_ctypes_test.pyd') diff --git a/tests/scancode/test_api.py b/tests/scancode/test_api.py index 4402736af8b..3fea677fdc2 100644 --- a/tests/scancode/test_api.py +++ b/tests/scancode/test_api.py @@ -66,7 +66,7 @@ def test_get_file_info_include_size(self): ('is_source', False), ('is_script', False) ] - assert expected == [(k, v) for k, v in info.items() if k != 'date'] + assert [(k, v) for k, v in info.items() if k != 'date'] == expected def test_get_copyrights_include_copyrights_and_authors(self): test_file = self.get_test_loc('api/copyright/iproute.c') @@ -83,7 +83,7 @@ def test_get_copyrights_include_copyrights_and_authors(self): ]), ]) - assert expected == cops + assert cops == expected def test_get_emails(self): test_file = self.get_test_loc('api/email/3w-xxxx.c') @@ -93,9 +93,9 @@ def test_get_emails(self): dict([(u'email', u'acme@conectiva.com.br'), (u'start_line', 3), (u'end_line', 3)]), dict([(u'email', u'andre@suse.com'), (u'start_line', 5), (u'end_line', 5)]) ]) - assert expected == results + assert results == expected results = api.get_emails(test_file, threshold=0) - assert expected == results + assert results == expected def test_get_emails_with_threshold(self): test_file = self.get_test_loc('api/email/3w-xxxx.c') @@ -103,7 +103,7 @@ def test_get_emails_with_threshold(self): expected = dict(emails=[ dict([(u'email', u'linux@3ware.com'), (u'start_line', 1), (u'end_line', 1)]), ]) - assert expected == results + assert results == expected def test_get_urls(self): test_file = self.get_test_loc('api/url/IMarkerActionFilter.java') @@ -113,9 +113,9 @@ def test_get_urls(self): dict([(u'url', u'https://github.com/rpm-software-management'), (u'start_line', 4), (u'end_line', 4)]), dict([(u'url', u'https://gitlab.com/Conan_Kudo'), (u'start_line', 6), (u'end_line', 6)]), ]) - assert expected == results + assert results == expected results = api.get_urls(test_file, threshold=0) - assert expected == results + assert results == expected def test_get_urls_with_threshold(self): test_file = self.get_test_loc('api/url/IMarkerActionFilter.java') @@ -123,7 +123,7 @@ def test_get_urls_with_threshold(self): dict([(u'url', u'http://www.eclipse.org/legal/epl-v10.html'), (u'start_line', 2), (u'end_line', 2)]) ]) results = api.get_urls(test_file, threshold=1) - assert expected == results + assert results == expected def test_get_license_with_expression(self): test_file = self.get_test_loc('api/license/apache-1.0.txt') @@ -132,17 +132,17 @@ def test_get_license_with_expression(self): 'apache-1.0', 'gpl-2.0 WITH linux-syscall-exception-gpl OR linux-openib' ] - assert expected == results['license_expressions'] + assert results['license_expressions'] == expected def test_get_license_with_expression2(self): test_file = self.get_test_loc('api/license/expression.RULE') results = api.get_licenses(test_file) expected = ['gpl-2.0 WITH linux-syscall-exception-gpl OR linux-openib'] - assert expected == results['license_expressions'] + assert results['license_expressions'] == expected def test_get_license_returns_correct_lines(self): test_file = self.get_test_loc('api/license/correct_lines2') results = api.get_licenses(test_file) - assert ['mit'] == results['license_expressions'] - assert 2 == results['licenses'][0]['start_line'] - assert 4 == results['licenses'][0]['end_line'] + assert results['license_expressions'] == ['mit'] + assert results['licenses'][0]['start_line'] == 2 + assert results['licenses'][0]['end_line'] == 4 diff --git a/tests/scancode/test_cli.py b/tests/scancode/test_cli.py index 1930ff504d5..5d6fb455f50 100644 --- a/tests/scancode/test_cli.py +++ b/tests/scancode/test_cli.py @@ -150,7 +150,7 @@ def test_scan_info_returns_full_root(): run_scan_click(args) result_data = json.loads(open(result_file).read()) file_paths = [f['path'] for f in result_data['files']] - assert 12 == len(file_paths) + assert len(file_paths) == 12 root = fileutils.as_posixpath(test_dir) assert all(p.startswith(root) for p in file_paths) @@ -166,7 +166,7 @@ def test_scan_info_returns_correct_full_root_with_single_file(): assert len(files) == 1 scanned_file = files[0] # and we check that the path is the full path without repeating the file name - assert fileutils.as_posixpath(test_file) == scanned_file['path'] + assert scanned_file['path'] == fileutils.as_posixpath(test_file) def test_scan_info_returns_does_not_strip_root_with_single_file(): @@ -356,7 +356,7 @@ def test_scan_works_with_multiple_processes_and_timeouts(): ] result_json = json.loads(open(result_file).read()) - assert sorted(sorted(x) for x in expected) == sorted(sorted(x.items()) for x in result_json['files']) + assert sorted(sorted(x.items()) for x in result_json['files']) == sorted(sorted(x) for x in expected) def check_scan_does_not_fail_when_scanning_unicode_files_and_paths(verbosity): @@ -459,7 +459,7 @@ def test_scan_quiet_to_stdout_only_echoes_json_results(): json_result1_output = load_json_result_from_string(result1_output) json_result_to_stdout = load_json_result_from_string(result_to_stdout.output) # cleanup JSON - assert json_result1_output == json_result_to_stdout + assert json_result_to_stdout == json_result1_output def test_scan_verbose_to_stdout_does_not_echo_ansi_escapes(): @@ -655,7 +655,7 @@ def test_scan_cli_help(regen=False): if regen: with io.open(expected_file, 'w', encoding='utf-8') as ef: ef.write(result.output) - assert open(expected_file).read() == result.output + assert result.output == open(expected_file).read() def test_scan_errors_out_with_unknown_option(): @@ -786,7 +786,7 @@ def test_get_displayable_summary(): u' scan_start: None', u' scan_end: None'] ) - assert expected == results + assert results == expected def test_display_summary_edge_case_scan_time_zero_should_not_fail(): @@ -846,7 +846,7 @@ def test_scan_keep_temp_files_is_false_by_default(): assert os.path.exists(temp_directory) # this does not make sense but that's what is seen in practice expected = 2 if on_windows else 1 - assert expected == len(list(os.walk(temp_directory))) + assert len(list(os.walk(temp_directory))) == expected def test_scan_keep_temp_files_keeps_files(): @@ -867,7 +867,7 @@ def test_scan_keep_temp_files_keeps_files(): assert os.path.exists(temp_directory) # this does not make sense but that's what is seen in practice expected = 8 if on_windows else 7 - assert expected == len(list(os.walk(temp_directory))) + assert len(list(os.walk(temp_directory))) == expected def test_scan_errors_out_without_an_input_path(): @@ -887,7 +887,7 @@ def test_merge_multiple_scans(): expected_files = json.loads(f.read())['files'] with open(result_file) as f: result_files = json.loads(f.read())['files'] - assert expected_files == result_files + assert result_files == expected_files def test_VirtualCodebase_output_with_from_json_is_same_as_original(): @@ -904,5 +904,5 @@ def test_VirtualCodebase_output_with_from_json_is_same_as_original(): expected_headers = expected.pop('headers', []) results_headers = results.pop('headers', []) - assert json.dumps(expected, indent=2) == json.dumps(results , indent=2) + assert json.dumps(results , indent=2) == json.dumps(expected, indent=2) assert len(results_headers) == len(expected_headers) + 1 diff --git a/tests/scancode/test_interrupt.py b/tests/scancode/test_interrupt.py index 28adcbf21af..5cabd012ff7 100644 --- a/tests/scancode/test_interrupt.py +++ b/tests/scancode/test_interrupt.py @@ -33,10 +33,10 @@ def some_long_function(exec_time): results = interrupt.interruptible(some_long_function, args=(0.01,), timeout=10) expected = None, 'OK' - assert expected == results + assert results == expected after = threading.active_count() - assert before == after + assert after == before def test_interruptible_stops_execution_on_timeout(self): before = threading.active_count() @@ -48,10 +48,10 @@ def some_long_function(exec_time): results = interrupt.interruptible(some_long_function, args=(20,), timeout=0.1) expected = 'ERROR: Processing interrupted: timeout after 0 seconds.', None - assert expected == results + assert results == expected after = threading.active_count() - assert before == after + assert after == before def test_interruptible_stops_execution_on_exception(self): before = threading.active_count() @@ -64,7 +64,7 @@ def some_crashing_function(): assert 'I have to crash. Now!' in results after = threading.active_count() - assert before == after + assert after == before def test_fake_interruptible_stops_execution_on_exception(self): def some_crashing_function(): diff --git a/tests/scancode/test_outdated.py b/tests/scancode/test_outdated.py index cd1299a6e29..8e05df0c625 100644 --- a/tests/scancode/test_outdated.py +++ b/tests/scancode/test_outdated.py @@ -39,7 +39,7 @@ def jget(*args, **kwargs): status_code=200 ) result = outdated.get_latest_version() - assert '3.0.2' == result + assert result == '3.0.2' def test_get_latest_version_fails_on_http_error(): @@ -71,7 +71,7 @@ def jget(*args, **kwargs): status_code=200 ) result = outdated.get_latest_version() - assert '2.2.1' == result + assert result == '2.2.1' def test_check_scancode_version(): diff --git a/tests/scancode/test_plugin_ignore.py b/tests/scancode/test_plugin_ignore.py index 53651bffa21..281c24e9803 100644 --- a/tests/scancode/test_plugin_ignore.py +++ b/tests/scancode/test_plugin_ignore.py @@ -52,7 +52,7 @@ def check_ProcessIgnore(self, test_dir, expected, ignore): test_plugin = ProcessIgnore() test_plugin.process_codebase(codebase, ignore=ignore) resources = [res.path for res in codebase.walk(skip_root=True)] - assert expected == sorted(resources) + assert sorted(resources) == expected def test_ProcessIgnore_with_single_file(self): test_dir = self.extract_test_tar('plugin_ignore/user.tgz') @@ -130,9 +130,9 @@ def test_scancode_ignore_vcs_files_and_dirs_by_default(self): scan_result = load_json_result(result_file) # a single test.tst file and its directory that is not a VCS file should # be listed - assert 1 == scan_result['headers'][0]['extra_data']['files_count'] + assert scan_result['headers'][0]['extra_data']['files_count'] == 1 scan_locs = [x['path'] for x in scan_result['files']] - assert [u'vcs', u'vcs/test.txt'] == scan_locs + assert scan_locs == [u'vcs', u'vcs/test.txt'] def test_scancode_ignore_vcs_files_and_dirs_by_default_no_multiprocess(self): test_dir = self.extract_test_tar('plugin_ignore/vcs.tgz') @@ -142,9 +142,9 @@ def test_scancode_ignore_vcs_files_and_dirs_by_default_no_multiprocess(self): scan_result = load_json_result(result_file) # a single test.tst file and its directory that is not a VCS file should # be listed - assert 1 == scan_result['headers'][0]['extra_data']['files_count'] + assert scan_result['headers'][0]['extra_data']['files_count'] == 1 scan_locs = [x['path'] for x in scan_result['files']] - assert [u'vcs', u'vcs/test.txt'] == scan_locs + assert scan_locs == [u'vcs', u'vcs/test.txt'] def test_scancode_ignore_single_file(self): test_dir = self.extract_test_tar('plugin_ignore/user.tgz') @@ -152,7 +152,7 @@ def test_scancode_ignore_single_file(self): args = ['--copyright', '--strip-root', '--ignore', 'sample.doc', test_dir, '--json', result_file] run_scan_click(args) scan_result = load_json_result(result_file) - assert 3 == scan_result['headers'][0]['extra_data']['files_count'] + assert scan_result['headers'][0]['extra_data']['files_count'] == 3 # FIXME: add assert 3 == scan_result['dirs_count'] scan_locs = [x['path'] for x in scan_result['files']] expected = [ @@ -163,7 +163,7 @@ def test_scancode_ignore_single_file(self): 'user/src/test', 'user/src/test/sample.txt' ] - assert expected == scan_locs + assert scan_locs == expected def test_scancode_ignore_multiple_files(self): test_dir = self.extract_test_tar('plugin_ignore/user.tgz') @@ -171,7 +171,7 @@ def test_scancode_ignore_multiple_files(self): args = ['--copyright', '--strip-root', '--ignore', 'ignore.doc', test_dir, '--json', result_file] run_scan_click(args) scan_result = load_json_result(result_file) - assert 2 == scan_result['headers'][0]['extra_data']['files_count'] + assert scan_result['headers'][0]['extra_data']['files_count'] == 2 scan_locs = [x['path'] for x in scan_result['files']] expected = [ u'user', @@ -179,7 +179,7 @@ def test_scancode_ignore_multiple_files(self): u'user/src/test', u'user/src/test/sample.doc', u'user/src/test/sample.txt'] - assert expected == scan_locs + assert scan_locs == expected def test_scancode_ignore_glob_files(self): test_dir = self.extract_test_tar('plugin_ignore/user.tgz') @@ -187,7 +187,7 @@ def test_scancode_ignore_glob_files(self): args = ['--copyright', '--strip-root', '--ignore', '*.doc', test_dir, '--json', result_file] run_scan_click(args) scan_result = load_json_result(result_file) - assert 1 == scan_result['headers'][0]['extra_data']['files_count'] + assert scan_result['headers'][0]['extra_data']['files_count'] == 1 scan_locs = [x['path'] for x in scan_result['files']] expected = [ u'user', @@ -195,7 +195,7 @@ def test_scancode_ignore_glob_files(self): u'user/src/test', u'user/src/test/sample.txt' ] - assert expected == scan_locs + assert scan_locs == expected def test_scancode_ignore_glob_path(self): test_dir = self.extract_test_tar('plugin_ignore/user.tgz') @@ -203,7 +203,7 @@ def test_scancode_ignore_glob_path(self): args = ['--copyright', '--strip-root', '--ignore', '*/src/test/*', test_dir, '--json', result_file] run_scan_click(args) scan_result = load_json_result(result_file) - assert 2 == scan_result['headers'][0]['extra_data']['files_count'] + assert scan_result['headers'][0]['extra_data']['files_count'] == 2 scan_locs = [x['path'] for x in scan_result['files']] expected = [ u'user', @@ -212,7 +212,7 @@ def test_scancode_ignore_glob_path(self): u'user/src/ignore.doc', u'user/src/test' ] - assert expected == scan_locs + assert scan_locs == expected def test_scancode_multiple_ignores(self): test_dir = self.extract_test_tar('plugin_ignore/user.tgz') @@ -220,9 +220,9 @@ def test_scancode_multiple_ignores(self): args = ['--copyright', '--strip-root', '--ignore', '*/src/test', '--ignore', '*.doc', test_dir, '--json', result_file] run_scan_click(args) scan_result = load_json_result(result_file) - assert 0 == scan_result['headers'][0]['extra_data']['files_count'] + assert scan_result['headers'][0]['extra_data']['files_count'] == 0 scan_locs = [x['path'] for x in scan_result['files']] - assert [u'user', u'user/src'] == scan_locs + assert scan_locs == [u'user', u'user/src'] def test_scancode_codebase_attempt_to_access_an_ignored_resourced_cached_to_disk(self): test_dir = self.extract_test_tar('plugin_ignore/user.tgz') @@ -230,7 +230,7 @@ def test_scancode_codebase_attempt_to_access_an_ignored_resourced_cached_to_disk args = ['--copyright', '--strip-root', '--ignore', 'test', test_dir, '--max-in-memory', '1', '--json', result_file] run_scan_click(args) scan_result = load_json_result(result_file) - assert 2 == scan_result['headers'][0]['extra_data']['files_count'] + assert scan_result['headers'][0]['extra_data']['files_count'] == 2 scan_locs = [x['path'] for x in scan_result['files']] expected = [ u'user', @@ -238,4 +238,4 @@ def test_scancode_codebase_attempt_to_access_an_ignored_resourced_cached_to_disk u'user/src', u'user/src/ignore.doc', ] - assert expected == scan_locs + assert scan_locs == expected diff --git a/tests/summarycode/test_generated.py b/tests/summarycode/test_generated.py index 505802c7be3..e7cc81a0957 100644 --- a/tests/summarycode/test_generated.py +++ b/tests/summarycode/test_generated.py @@ -26,7 +26,7 @@ def test_basic(self): ] test_file = self.get_test_loc('generated/simple/generated_1.java') result = list(generated.get_generated_code_hint(location=test_file)) - assert expected == result + assert result == expected def test_basic_alt(self): expected = [ @@ -36,19 +36,19 @@ def test_basic_alt(self): ] test_file = self.get_test_loc('generated/simple/generated_3.java') result = list(generated.get_generated_code_hint(location=test_file)) - assert expected == result + assert result == expected def test_basic2(self): expected = ['* This class was generated by the JAX-WS RI.'] test_file = self.get_test_loc('generated/simple/generated_2.java') result = list(generated.get_generated_code_hint(location=test_file)) - assert expected == result + assert result == expected def test_basic3(self): expected = ['/* This class was automatically generated'] test_file = self.get_test_loc('generated/simple/generated_4.java') result = list(generated.get_generated_code_hint(location=test_file)) - assert expected == result + assert result == expected def test_basic4(self): expected = [ @@ -57,13 +57,13 @@ def test_basic4(self): ] test_file = self.get_test_loc('generated/simple/generated_5.java') result = list(generated.get_generated_code_hint(location=test_file)) - assert expected == result + assert result == expected def test_basic5(self): expected = ['/* DO NOT EDIT THIS FILE - it is machine generated */'] test_file = self.get_test_loc('generated/simple/generated_6.c') result = list(generated.get_generated_code_hint(location=test_file)) - assert expected == result + assert result == expected def test_configure(self): expected = [ @@ -71,7 +71,7 @@ def test_configure(self): ] test_file = self.get_test_loc('generated/simple/configure') result = list(generated.get_generated_code_hint(location=test_file)) - assert expected == result + assert result == expected def test_tomcat_jspc(self): expected = [ @@ -79,7 +79,7 @@ def test_tomcat_jspc(self): ] test_file = self.get_test_loc('generated/jspc/web.xml') result = list(generated.get_generated_code_hint(location=test_file)) - assert expected == result + assert result == expected def test_generated_cli_option(self): test_dir = self.get_test_loc('generated/simple') diff --git a/tests/textcode/test_analysis.py b/tests/textcode/test_analysis.py index 26c3ae695cc..9bf7e635462 100644 --- a/tests/textcode/test_analysis.py +++ b/tests/textcode/test_analysis.py @@ -28,7 +28,7 @@ def check_text_lines(result, expected_file, regen=False): json.dump(result, tf, indent=2) with open(expected_file, 'rb') as tf: expected = json.load(tf) - assert expected == result + assert result == expected class TestAnalysis(FileBasedTesting): @@ -55,7 +55,7 @@ def test_unicode_text_lines_handles_weird_xml_encodings(self): def test_archives_do_not_yield_numbered_text_lines(self): test_file = self.get_test_loc('archive/simple.jar') result = list(numbered_text_lines(test_file)) - assert [] == result + assert result == [] def test_mpg_media_do_not_yield_numbered_text_lines(self): test_dir = self.get_test_loc('media_with_text') @@ -67,14 +67,14 @@ def test_image_media_do_not_yield_numbered_text_lines(self): test_dir = self.get_test_loc('media_without_text') for test_file in resource_iter(test_dir, with_dirs=False): result = list(numbered_text_lines(test_file)) - assert [] == result, 'Should not return text lines:' + test_file + assert result == [], 'Should not return text lines:' + test_file def test_numbered_text_lines_handles_sfdb(self): test_file = self.get_test_loc('analysis/splinefonts/Ambrosia.sfd') result = list(l for _, l in numbered_text_lines(test_file)) expected_file = test_file + '.expected' expected = open(expected_file, 'r').read().splitlines(True) - assert expected == list(result) + assert list(result) == expected def test_numbered_text_lines_handles_jsmap1(self): test_file = self.get_test_loc('analysis/jsmap/angular-sanitize.min.js.map') @@ -117,8 +117,8 @@ def test_numbered_text_lines_return_correct_number_of_lines(self): 'the rights to use, copy, modify, merge, , , sublicense, and/or Software, ,'), (1, u' subject') ] - assert expected == result - assert 2 == len(result) + assert result == expected + assert len(result) == 2 def test_as_unicode_converts_bytes_to_unicode(self): test_line = ' // as defined in https://tools.ietf.org/html/rfc2821#section-4.1.2.'.encode() @@ -140,10 +140,10 @@ def test_as_unicode_from_bytes_replaces_null_bytes_with_space(self): test = b'\x00is designed to give them, \x00BEFORE the\x00\x00\x00\x00\x00\x00' result = as_unicode(test) expected = ' is designed to give them, BEFORE the ' - assert expected == result + assert result == expected def test_as_unicode_from_unicode_replaces_null_bytes_with_space(self): test = '\x00is designed to give them, \x00BEFORE the\x00\x00\x00\x00\x00\x00' result = as_unicode(test) expected = ' is designed to give them, BEFORE the ' - assert expected == result + assert result == expected diff --git a/tests/textcode/test_markup.py b/tests/textcode/test_markup.py index 5a0cc55ed91..4a73c1b5f4c 100644 --- a/tests/textcode/test_markup.py +++ b/tests/textcode/test_markup.py @@ -210,5 +210,5 @@ def test_jsp_demarkup(self): u' \r\n', u' ' ] - assert expected == result + assert result == expected diff --git a/tests/textcode/test_pdf.py b/tests/textcode/test_pdf.py index 9a749341b03..bdaab4ed24a 100644 --- a/tests/textcode/test_pdf.py +++ b/tests/textcode/test_pdf.py @@ -47,7 +47,7 @@ def get_text(location): \x0c'''.splitlines(True) - assert expected == result + assert result == expected def test_pdfminer_can_parse_faulty_broadcom_doc(self): # test for https://github.com/euske/pdfminer/issues/118 @@ -75,7 +75,7 @@ def test_get_text_lines_can_parse_faulty_broadcom_doc(self): b'10/15/07\n', b'\n', b'\x0c'] - assert expected == result + assert result == expected def test_pdfminer_can_parse_apache_fop_test_pdf(self): test_file = self.get_test_loc('pdf/fop_test_pdf_1.5_test.pdf') @@ -87,12 +87,12 @@ def test_pdfminer_can_parse_apache_fop_test_pdf(self): result = pdf.get_text_lines(test_file) expected = apache_fop_expected - assert expected == result + assert result == expected def test_numbered_text_lines_does_not_fail_on_autocad_test_pdf(self): test_file = self.get_test_loc('pdf/AutoCad_Diagram.pdf') result = list(numbered_text_lines(test_file)) - assert [] == result + assert result == [] apache_fop_expected = [ diff --git a/tests/textcode/test_sfdb.py b/tests/textcode/test_sfdb.py index 8104f552e44..fa0ac3e1b75 100644 --- a/tests/textcode/test_sfdb.py +++ b/tests/textcode/test_sfdb.py @@ -20,7 +20,7 @@ def check_get_text_lines(self, test_file, expected_file): test_file = self.get_test_loc(test_file) expected_file = self.get_test_loc(expected_file) expected = open(expected_file, 'rb').read().splitlines(True) - assert expected == list(sfdb.get_text_lines(test_file)) + assert list(sfdb.get_text_lines(test_file)) == expected def test_get_text_lines_ambro(self): test_file = 'splinefonts/Ambrosia.sfd' diff --git a/tests/textcode/test_strings.py b/tests/textcode/test_strings.py index db0603db777..86ca93da856 100644 --- a/tests/textcode/test_strings.py +++ b/tests/textcode/test_strings.py @@ -28,7 +28,7 @@ def check_file_strings(self, test_file, expected_file, regen=False): with io.open(expected) as i: expected = json.loads(i.read()) - assert expected == results + assert results == expected return results def test_clean_string(self): @@ -68,7 +68,7 @@ def test_strings_in_file(self): test_file = self.get_test_loc('strings/basic/main.o') result = list(strings.strings_from_file(test_file)) - assert expected == result + assert result == expected def test_strings_in_file_with_min_len(self): expected = [ @@ -87,7 +87,7 @@ def test_strings_in_file_with_min_len(self): test_file = self.get_test_loc('strings/basic/main.o') result = list(strings.strings_from_file(test_file, min_len=6)) - assert expected == result + assert result == expected def test_strings_in_file_does_fail_if_contains_ERROR_string(self): test_file = self.get_test_loc('strings/bin/file_stripped') @@ -117,7 +117,7 @@ def test_file_strings_is_good(self): test_file = self.get_test_loc('strings/basic/main.o') result = list(strings.strings_from_file(test_file)) - assert expected == result + assert result == expected def test_strings_in_fonts(self): test_file = 'strings/font/DarkGardenMK.ttf'