diff --git a/docs/tuf-spec.txt b/docs/tuf-spec.txt index 56a39773e1..55528bd0ba 100644 --- a/docs/tuf-spec.txt +++ b/docs/tuf-spec.txt @@ -1000,12 +1000,14 @@ Version 1.0 (Draft) To replace a compromised root key or any other top-level role key, the root role signs a new root.json file that lists the updated trusted keys for the role. When replacing root keys, an application will sign the new root.json - file with both the new and old root keys until all clients are known to have - obtained the new root.json file (a safe assumption is that this will be a - very long time or never). There is no risk posed by continuing to sign the - root.json file with revoked keys as once clients have updated they no longer - trust the revoked key. This is only to ensure outdated clients remain able - to update. + file with both the new and old root keys. Any time such a change is + required, the root.json file is versioned and accessible by version number, + e.g. 3.root.json. Clients update the set of trusted root keys by requesting + the current root.json and all previous root.json versions, until one is + found that has been signed by keys the client already trusts. This is to + ensure that outdated clients remain able to update, without requiring all + previous root keys to be kept to sign new root.json metadata. + To replace a delegated developer key, the role that delegated to that key just replaces that key with another in the signed metadata where the diff --git a/tests/repository_data/client/basic_client.py b/tests/repository_data/client/basic_client.py deleted file mode 100755 index cec2193d60..0000000000 --- a/tests/repository_data/client/basic_client.py +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env python - -""" - - basic_client.py - - - Vladimir Diaz - - - September 2012 - - - See LICENSE for licensing information. - - - Provide a basic TUF client that can update all of the metatada and target - files provided by the user-specified repository mirror. Updated files are - saved to the 'targets' directory in the current working directory. The - repository mirror is specified by the user through the '--repo' command- - line option. - - Normally, a software updater integrating TUF will develop their own costum - client module by importing 'tuf.client.updater', instantiating the required - object, and calling the desired methods to perform an update. This basic - client is provided to users who wish to give TUF a quick test run without - the hassle of writing client code. This module can also used by updaters that - do not need the customization and only require their clients to perform an - update of all the files provided by their repository mirror(s). - - For software updaters that DO require customization, see the 'example_client.py' - script. The 'example_client.py' script provides an outline of the client code - that software updaters may develop and then tailor to their specific software - updater or package manager. - - Additional tools for clients running legacy applications will also be made - available. These tools will allow secure software updates using The Update - Framework without the need to modify the original application. - - - $ python basic_client.py --repo http://localhost:8001 - $ python basic_client.py --repo http://localhost:8001 --verbose 3 - - - --verbose: - Set the verbosity level of logging messages. Accepts values 1-5. - - --repo: - Set the repository mirror that will be responding to client requests. - E.g., 'http://locahost:8001'. -""" - -# Help with Python 3 compatibility, where the print statement is a function, an -# implicit relative import is invalid, and the '/' operator performs true -# division. Example: print 'hello world' raises a 'SyntaxError' exception. -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division -from __future__ import unicode_literals - -import sys -import optparse -import logging - -import tuf -import tuf.formats -import tuf.client.updater -import tuf.log - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger('tuf.basic_client') - - -def update_client(repository_mirror): - """ - - Perform an update of the metadata and target files located at - 'repository_mirror'. Target files are saved to the 'targets' directory - in the current working directory. The current directory must already - include a 'metadata' directory, which in turn must contain the 'current' - and 'previous' directories. At a minimum, these two directories require - the 'root.json' metadata file. - - - repository_mirror: - The URL to the repository mirror hosting the metadata and target - files. E.g., 'http://localhost:8001' - - - tuf.RepositoryError, if 'repository_mirror' is improperly formatted. - - - Connects to a repository mirror and updates the metadata files and - any target files. Obsolete targets are also removed locally. - - - None. - """ - - # Does 'repository_mirror' have the correct format? - try: - tuf.formats.URL_SCHEMA.check_match(repository_mirror) - except tuf.FormatError as e: - message = 'The repository mirror supplied is invalid.' - raise tuf.RepositoryError(message) - - # Set the local repository directory containing all of the metadata files. - tuf.conf.repository_directory = '.' - - # Set the repository mirrors. This dictionary is needed by the Updater - # class of updater.py. - repository_mirrors = {'mirror': {'url_prefix': repository_mirror, - 'metadata_path': 'metadata', - 'targets_path': 'targets', - 'confined_target_dirs': ['']}} - - # Create the repository object using the repository name 'repository' - # and the repository mirrors defined above. - updater = tuf.client.updater.Updater('repository', repository_mirrors) - - # The local destination directory to save the target files. - destination_directory = './targets' - - # Refresh the repository's top-level roles, store the target information for - # all the targets tracked, and determine which of these targets have been - # updated. - updater.refresh() - """ - all_targets = updater.all_targets() - updated_targets = updater.updated_targets(all_targets, destination_directory) - - # Download each of these updated targets and save them locally. - for target in updated_targets: - try: - updater.download_target(target, destination_directory) - except tuf.DownloadError as e: - pass - - # Remove any files from the destination directory that are no longer being - # tracked. - updater.remove_obsolete_targets(destination_directory) - """ - - - - -def parse_options(): - """ - - Parse the command-line options and set the logging level - as specified by the user through the --verbose option. - 'basic_client' expects the '--repo' to be set by the user. - - Example: - $ python basic_client.py --repo http://localhost:8001 - - If the required option is unset, a parser error is printed - and the scripts exits. - - - None. - - - None. - - - Sets the logging level for TUF logging. - - - The 'options.REPOSITORY_MIRROR' string. - """ - - parser = optparse.OptionParser() - - # Add the options supported by 'basic_client' to the option parser. - parser.add_option('--verbose', dest='VERBOSE', type=int, default=2, - help='Set the verbosity level of logging messages.' - 'The lower the setting, the greater the verbosity.') - - parser.add_option('--repo', dest='REPOSITORY_MIRROR', type='string', - help='Specifiy the repository mirror\'s URL prefix ' - '(e.g., http://www.example.com:8001/tuf/).' - ' The client will download updates from this mirror.') - - options, args = parser.parse_args() - - # Set the logging level. - if options.VERBOSE == 5: - tuf.log.set_log_level(logging.CRITICAL) - elif options.VERBOSE == 4: - tuf.log.set_log_level(logging.ERROR) - elif options.VERBOSE == 3: - tuf.log.set_log_level(logging.WARNING) - elif options.VERBOSE == 2: - tuf.log.set_log_level(logging.INFO) - elif options.VERBOSE == 1: - tuf.log.set_log_level(logging.DEBUG) - else: - tuf.log.set_log_level(logging.NOTSET) - - # Ensure the '--repo' option was set by the user. - if options.REPOSITORY_MIRROR is None: - message = '"--repo" must be set on the command-line.' - parser.error(message) - - # Return the repository mirror containing the metadata and target files. - return options.REPOSITORY_MIRROR - - - -if __name__ == '__main__': - - # Parse the options and set the logging level. - repository_mirror = parse_options() - - # Perform an update of all the files in the 'targets' directory located in - # the current directory. - try: - update_client(repository_mirror) - - except (tuf.NoWorkingMirrorError, tuf.RepositoryError) as e: - sys.stderr.write('Error: '+str(e)+'\n') - sys.exit(1) - - # Successfully updated the client's target files. - sys.exit(0) diff --git a/tests/repository_data/client/metadata/current/empty_file.json b/tests/repository_data/client/metadata/current/empty_file.json deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/repository_data/client/metadata/current/role1.json b/tests/repository_data/client/metadata/current/role1.json index 37b378d3e2..96d921b5eb 100644 --- a/tests/repository_data/client/metadata/current/role1.json +++ b/tests/repository_data/client/metadata/current/role1.json @@ -3,14 +3,35 @@ { "keyid": "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9", "method": "ed25519", - "sig": "98c7634186f7a02b3a56d8204e62b3a1d25225935dc47c720426ef591d09931e071f96f8d47ef3ec814dd7278f05c01190e60386ad03e546869c7aeeb3249703" + "sig": "e8f6db97fcad5eb2ca1cf5fc6b6d4579d026811581b0d2061af90c7cb26d966e15a06e7c596f663b05aa061308929f96136167359fc9d44919a36383403abd09" } ], "signed": { "_type": "Targets", "delegations": { - "keys": {}, - "roles": [] + "keys": { + "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "3b11296fe2dba14a2ef204e542e9e4195293bcf3042655e3d7e4ef5afe3cf36a" + } + } + }, + "roles": [ + { + "keyids": [ + "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9" + ], + "name": "role2", + "paths": [], + "terminating": false, + "threshold": 1 + } + ] }, "expires": "2030-01-01T00:00:00Z", "targets": { diff --git a/tests/repository_data/client/metadata/current/role1.json.gz b/tests/repository_data/client/metadata/current/role1.json.gz index 53d950b9bd..e2fd36f00b 100644 Binary files a/tests/repository_data/client/metadata/current/role1.json.gz and b/tests/repository_data/client/metadata/current/role1.json.gz differ diff --git a/tests/repository_data/client/metadata/current/role2.json b/tests/repository_data/client/metadata/current/role2.json new file mode 100644 index 0000000000..20b1206a70 --- /dev/null +++ b/tests/repository_data/client/metadata/current/role2.json @@ -0,0 +1,19 @@ +{ + "signatures": [ + { + "keyid": "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9", + "method": "ed25519", + "sig": "8fdca8154157e983d86efb16917ad973941dfa75a47d99a88b393d0955f1508aff55b66d0592ff2ad2f431d6826d6544009a921b5aae503f3f795b09ed549f0a" + } + ], + "signed": { + "_type": "Targets", + "delegations": { + "keys": {}, + "roles": [] + }, + "expires": "2030-01-01T00:00:00Z", + "targets": {}, + "version": 1 + } +} \ No newline at end of file diff --git a/tests/repository_data/client/metadata/current/role2.json.gz b/tests/repository_data/client/metadata/current/role2.json.gz new file mode 100644 index 0000000000..6074f9bdfd Binary files /dev/null and b/tests/repository_data/client/metadata/current/role2.json.gz differ diff --git a/tests/repository_data/client/metadata/current/root.json b/tests/repository_data/client/metadata/current/root.json index 94327a115a..ab8e8dc859 100644 --- a/tests/repository_data/client/metadata/current/root.json +++ b/tests/repository_data/client/metadata/current/root.json @@ -3,7 +3,7 @@ { "keyid": "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503", "method": "RSASSA-PSS", - "sig": "3851d11ed11ea69ab5d873cfd015de79dc856d83e0a060e73d535d705da086c26191e6bc1ed6bbdde9305c3816c1c5885b48cf51c41fedc906a5ebe0e33a6b823145d40bd3e588e77c6bc724b62f4b2ca9700da03e0ba603170bfd365ea1d25ee7f9661848a14f5916869f00f3e03aa4cb468a4de647bbf205b96f9aa8dd408e3e0b1f9d53fe74654dfe139441dfe3651b3473b67bd104d754112e594a9c6ed0127e94b9057322d630f70c93c01d0cd0c2b98f6abdfd2ed7ac7dc5d3e201d191e168992574edfa935bb2a2cbaa67532c7aaddd4582b53a015c11e567d7fe7ba38cc743e7a939b9e7f2e334b48f46bdf4b82b66e639189644998d90a27847e63e8ade170f8c8aa15c8076b0af8032d78870ac18278663eddb08a7eed30c199c97c81d30bdf47d6649c7ab297120b983d9b6a1da648026d552be73bb77a9346f98a3b8db1a583b71bb706c397a3142f8194c80e62a1632152cd2ffd340605325ea39baf60fb30cf574701e5ae07efee75fc51df4f1810f3ce14345c466d25e36a3" + "sig": "3a5a1d2c4fba47117b1d297517261da9f13d6bfbdfd322bb68d5631ff456dba6446e35cd0d67cf068d1592cd80333f5566b74225dfbdc2aead60ea4ca2f79a4d4542e9d6039d9715404f07ca05145b02a53241ec30a992161777cc9154e9a8fc37cd292f6dd11af4acfc307b8b4ccb3024ad3d5409d24b91b6ae9f542d8813641f0d8d4c5a16d30f471937c2badcffc591f0e32f81755b44e8139d69042213997d459711c482a7bde2e0177ba0079a3d7cf19f825f0619c114dc88ff9eada298b7e524c727a51fbe5b9e59221f0a515931427aa662022738a03c3b1f44953e6a110e0aacbd55c328f9f0bbe97d3f6bde5fe0b3d390b5da3442ea02cc06b7b5daef31f2356283ab197d11f677c57106897b27ca2c2ada87880d906416d9de90ac1593312af726f4a43f9290b19a81d4d092be6408deb53469dabd27f1d4a16d5f306736d483a5f9b4ab820d4e8ffca2f05ba0e501062da11389da137a0aff7c8111e28269a609fe602eb1786d1732f43d6cddbe6c5847241697a7ed5f395879b4" } ], "signed": { diff --git a/tests/repository_data/client/metadata/current/root.json.gz b/tests/repository_data/client/metadata/current/root.json.gz index 4a13f7efd6..b4c12d146d 100644 Binary files a/tests/repository_data/client/metadata/current/root.json.gz and b/tests/repository_data/client/metadata/current/root.json.gz differ diff --git a/tests/repository_data/client/metadata/current/snapshot.json b/tests/repository_data/client/metadata/current/snapshot.json index d1d22bd88a..bf565452c9 100644 --- a/tests/repository_data/client/metadata/current/snapshot.json +++ b/tests/repository_data/client/metadata/current/snapshot.json @@ -3,7 +3,7 @@ { "keyid": "182216b8800c50ddf000043b31ddf90d815c754ab4e0b31a5952a839b371bed9", "method": "ed25519", - "sig": "1f81170dfee2f170f6814cb2f909f0ad0a283eb3f8b7924f41d14ea81efab2c43491aaedce30338fce483fcad32ba0bde729e9b6b44888c99401ee04a5e43302" + "sig": "9419a135b0c41fe350d712f944047661ddfa2c8b4cb141088976bc789c8ea55aba6efff78dcfa46b11790136281ae649e1e421713fbab47e274e1afd838aca03" } ], "signed": { @@ -13,9 +13,12 @@ "role1.json": { "version": 1 }, + "role2.json": { + "version": 1 + }, "root.json": { "hashes": { - "sha256": "9d0ed7fce4914cd97997c03def8c94b0aeb10aebc383e0d747a8e8257a84c8ff" + "sha256": "03843cc3b2a50d363894b2aa26e617466147355487d647abd36aba209e69a6e6" }, "length": 3329, "version": 1 diff --git a/tests/repository_data/client/metadata/current/snapshot.json.gz b/tests/repository_data/client/metadata/current/snapshot.json.gz index 51d94d42f1..88aa0f12c9 100644 Binary files a/tests/repository_data/client/metadata/current/snapshot.json.gz and b/tests/repository_data/client/metadata/current/snapshot.json.gz differ diff --git a/tests/repository_data/client/metadata/current/targets.json b/tests/repository_data/client/metadata/current/targets.json index 8c9337d88c..0620bfe42a 100644 --- a/tests/repository_data/client/metadata/current/targets.json +++ b/tests/repository_data/client/metadata/current/targets.json @@ -3,7 +3,7 @@ { "keyid": "a0a0f0cf08daff7afd1eb6582756d43987aa73f028044836a5519259706ca19b", "method": "ed25519", - "sig": "e96f9ca4425a37919dc91d5679c5319150b41f729389d70be7d8c8dc3dda647aa9fd11ca3c6a959c10819d652e516b375caf147721f96af329b54c0720373c06" + "sig": "74ee9970ed709ab65586ef99c0005102676a92f11e2a448bb685875b641d2efe3fd2bdefaa90e1a050bfbb34163834aadb43d13ac0c7452aa7df27c454c34507" } ], "signed": { @@ -23,7 +23,6 @@ }, "roles": [ { - "backtrack": true, "keyids": [ "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9" ], @@ -31,6 +30,7 @@ "paths": [ "/file3.txt" ], + "terminating": false, "threshold": 1 } ] diff --git a/tests/repository_data/client/metadata/current/targets.json.gz b/tests/repository_data/client/metadata/current/targets.json.gz index 44e222812c..0753ea6945 100644 Binary files a/tests/repository_data/client/metadata/current/targets.json.gz and b/tests/repository_data/client/metadata/current/targets.json.gz differ diff --git a/tests/repository_data/client/metadata/current/timestamp.json b/tests/repository_data/client/metadata/current/timestamp.json index e886fb3e80..3f44ae9773 100644 --- a/tests/repository_data/client/metadata/current/timestamp.json +++ b/tests/repository_data/client/metadata/current/timestamp.json @@ -3,7 +3,7 @@ { "keyid": "3f09f6468a522bea0364a23315872d8400875dfdb24ff4ecd32f27164d5c23c1", "method": "ed25519", - "sig": "1749729587ae99bdbaeff59562f1a06cfa3f2b8c31d0b82b8f10fcb4cc5fbf21a0df885e57994bae1b542b814cafaf506d357618515c18d8b5e4b23b90e26506" + "sig": "9a43adeef13b9d1d15f2b773e7d62b667761a65b34f2bd04c2565d842c113a85307131cfd6ae9f83d91963503783c6d98692887c322c49a6ae7b1d0869ce2105" } ], "signed": { @@ -12,9 +12,9 @@ "meta": { "snapshot.json": { "hashes": { - "sha256": "b55cf3ef997ca2d2be2ec921cdc8ab278ee77748041f9426dbe4ce1b2c8ba781" + "sha256": "c15b14217a3ad50ae2c136109983b7269d3bda42c4e1e530e30e7b5854b53a11" }, - "length": 636, + "length": 678, "version": 1 } }, diff --git a/tests/repository_data/client/metadata/current/timestamp.json.gz b/tests/repository_data/client/metadata/current/timestamp.json.gz index c855737bd9..f59476941f 100644 Binary files a/tests/repository_data/client/metadata/current/timestamp.json.gz and b/tests/repository_data/client/metadata/current/timestamp.json.gz differ diff --git a/tests/repository_data/client/metadata/previous/role1.json b/tests/repository_data/client/metadata/previous/role1.json index 37b378d3e2..96d921b5eb 100644 --- a/tests/repository_data/client/metadata/previous/role1.json +++ b/tests/repository_data/client/metadata/previous/role1.json @@ -3,14 +3,35 @@ { "keyid": "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9", "method": "ed25519", - "sig": "98c7634186f7a02b3a56d8204e62b3a1d25225935dc47c720426ef591d09931e071f96f8d47ef3ec814dd7278f05c01190e60386ad03e546869c7aeeb3249703" + "sig": "e8f6db97fcad5eb2ca1cf5fc6b6d4579d026811581b0d2061af90c7cb26d966e15a06e7c596f663b05aa061308929f96136167359fc9d44919a36383403abd09" } ], "signed": { "_type": "Targets", "delegations": { - "keys": {}, - "roles": [] + "keys": { + "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "3b11296fe2dba14a2ef204e542e9e4195293bcf3042655e3d7e4ef5afe3cf36a" + } + } + }, + "roles": [ + { + "keyids": [ + "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9" + ], + "name": "role2", + "paths": [], + "terminating": false, + "threshold": 1 + } + ] }, "expires": "2030-01-01T00:00:00Z", "targets": { diff --git a/tests/repository_data/client/metadata/previous/role1.json.gz b/tests/repository_data/client/metadata/previous/role1.json.gz index 53d950b9bd..e2fd36f00b 100644 Binary files a/tests/repository_data/client/metadata/previous/role1.json.gz and b/tests/repository_data/client/metadata/previous/role1.json.gz differ diff --git a/tests/repository_data/client/metadata/previous/role2.json b/tests/repository_data/client/metadata/previous/role2.json new file mode 100644 index 0000000000..20b1206a70 --- /dev/null +++ b/tests/repository_data/client/metadata/previous/role2.json @@ -0,0 +1,19 @@ +{ + "signatures": [ + { + "keyid": "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9", + "method": "ed25519", + "sig": "8fdca8154157e983d86efb16917ad973941dfa75a47d99a88b393d0955f1508aff55b66d0592ff2ad2f431d6826d6544009a921b5aae503f3f795b09ed549f0a" + } + ], + "signed": { + "_type": "Targets", + "delegations": { + "keys": {}, + "roles": [] + }, + "expires": "2030-01-01T00:00:00Z", + "targets": {}, + "version": 1 + } +} \ No newline at end of file diff --git a/tests/repository_data/client/metadata/previous/role2.json.gz b/tests/repository_data/client/metadata/previous/role2.json.gz new file mode 100644 index 0000000000..6074f9bdfd Binary files /dev/null and b/tests/repository_data/client/metadata/previous/role2.json.gz differ diff --git a/tests/repository_data/client/metadata/previous/root.json b/tests/repository_data/client/metadata/previous/root.json index 94327a115a..ab8e8dc859 100644 --- a/tests/repository_data/client/metadata/previous/root.json +++ b/tests/repository_data/client/metadata/previous/root.json @@ -3,7 +3,7 @@ { "keyid": "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503", "method": "RSASSA-PSS", - "sig": "3851d11ed11ea69ab5d873cfd015de79dc856d83e0a060e73d535d705da086c26191e6bc1ed6bbdde9305c3816c1c5885b48cf51c41fedc906a5ebe0e33a6b823145d40bd3e588e77c6bc724b62f4b2ca9700da03e0ba603170bfd365ea1d25ee7f9661848a14f5916869f00f3e03aa4cb468a4de647bbf205b96f9aa8dd408e3e0b1f9d53fe74654dfe139441dfe3651b3473b67bd104d754112e594a9c6ed0127e94b9057322d630f70c93c01d0cd0c2b98f6abdfd2ed7ac7dc5d3e201d191e168992574edfa935bb2a2cbaa67532c7aaddd4582b53a015c11e567d7fe7ba38cc743e7a939b9e7f2e334b48f46bdf4b82b66e639189644998d90a27847e63e8ade170f8c8aa15c8076b0af8032d78870ac18278663eddb08a7eed30c199c97c81d30bdf47d6649c7ab297120b983d9b6a1da648026d552be73bb77a9346f98a3b8db1a583b71bb706c397a3142f8194c80e62a1632152cd2ffd340605325ea39baf60fb30cf574701e5ae07efee75fc51df4f1810f3ce14345c466d25e36a3" + "sig": "3a5a1d2c4fba47117b1d297517261da9f13d6bfbdfd322bb68d5631ff456dba6446e35cd0d67cf068d1592cd80333f5566b74225dfbdc2aead60ea4ca2f79a4d4542e9d6039d9715404f07ca05145b02a53241ec30a992161777cc9154e9a8fc37cd292f6dd11af4acfc307b8b4ccb3024ad3d5409d24b91b6ae9f542d8813641f0d8d4c5a16d30f471937c2badcffc591f0e32f81755b44e8139d69042213997d459711c482a7bde2e0177ba0079a3d7cf19f825f0619c114dc88ff9eada298b7e524c727a51fbe5b9e59221f0a515931427aa662022738a03c3b1f44953e6a110e0aacbd55c328f9f0bbe97d3f6bde5fe0b3d390b5da3442ea02cc06b7b5daef31f2356283ab197d11f677c57106897b27ca2c2ada87880d906416d9de90ac1593312af726f4a43f9290b19a81d4d092be6408deb53469dabd27f1d4a16d5f306736d483a5f9b4ab820d4e8ffca2f05ba0e501062da11389da137a0aff7c8111e28269a609fe602eb1786d1732f43d6cddbe6c5847241697a7ed5f395879b4" } ], "signed": { diff --git a/tests/repository_data/client/metadata/previous/root.json.gz b/tests/repository_data/client/metadata/previous/root.json.gz index 4a13f7efd6..b4c12d146d 100644 Binary files a/tests/repository_data/client/metadata/previous/root.json.gz and b/tests/repository_data/client/metadata/previous/root.json.gz differ diff --git a/tests/repository_data/client/metadata/previous/snapshot.json b/tests/repository_data/client/metadata/previous/snapshot.json index d1d22bd88a..bf565452c9 100644 --- a/tests/repository_data/client/metadata/previous/snapshot.json +++ b/tests/repository_data/client/metadata/previous/snapshot.json @@ -3,7 +3,7 @@ { "keyid": "182216b8800c50ddf000043b31ddf90d815c754ab4e0b31a5952a839b371bed9", "method": "ed25519", - "sig": "1f81170dfee2f170f6814cb2f909f0ad0a283eb3f8b7924f41d14ea81efab2c43491aaedce30338fce483fcad32ba0bde729e9b6b44888c99401ee04a5e43302" + "sig": "9419a135b0c41fe350d712f944047661ddfa2c8b4cb141088976bc789c8ea55aba6efff78dcfa46b11790136281ae649e1e421713fbab47e274e1afd838aca03" } ], "signed": { @@ -13,9 +13,12 @@ "role1.json": { "version": 1 }, + "role2.json": { + "version": 1 + }, "root.json": { "hashes": { - "sha256": "9d0ed7fce4914cd97997c03def8c94b0aeb10aebc383e0d747a8e8257a84c8ff" + "sha256": "03843cc3b2a50d363894b2aa26e617466147355487d647abd36aba209e69a6e6" }, "length": 3329, "version": 1 diff --git a/tests/repository_data/client/metadata/previous/snapshot.json.gz b/tests/repository_data/client/metadata/previous/snapshot.json.gz index 51d94d42f1..88aa0f12c9 100644 Binary files a/tests/repository_data/client/metadata/previous/snapshot.json.gz and b/tests/repository_data/client/metadata/previous/snapshot.json.gz differ diff --git a/tests/repository_data/client/metadata/previous/targets.json b/tests/repository_data/client/metadata/previous/targets.json index 8c9337d88c..0620bfe42a 100644 --- a/tests/repository_data/client/metadata/previous/targets.json +++ b/tests/repository_data/client/metadata/previous/targets.json @@ -3,7 +3,7 @@ { "keyid": "a0a0f0cf08daff7afd1eb6582756d43987aa73f028044836a5519259706ca19b", "method": "ed25519", - "sig": "e96f9ca4425a37919dc91d5679c5319150b41f729389d70be7d8c8dc3dda647aa9fd11ca3c6a959c10819d652e516b375caf147721f96af329b54c0720373c06" + "sig": "74ee9970ed709ab65586ef99c0005102676a92f11e2a448bb685875b641d2efe3fd2bdefaa90e1a050bfbb34163834aadb43d13ac0c7452aa7df27c454c34507" } ], "signed": { @@ -23,7 +23,6 @@ }, "roles": [ { - "backtrack": true, "keyids": [ "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9" ], @@ -31,6 +30,7 @@ "paths": [ "/file3.txt" ], + "terminating": false, "threshold": 1 } ] diff --git a/tests/repository_data/client/metadata/previous/targets.json.gz b/tests/repository_data/client/metadata/previous/targets.json.gz index 44e222812c..0753ea6945 100644 Binary files a/tests/repository_data/client/metadata/previous/targets.json.gz and b/tests/repository_data/client/metadata/previous/targets.json.gz differ diff --git a/tests/repository_data/client/metadata/previous/timestamp.json b/tests/repository_data/client/metadata/previous/timestamp.json index e886fb3e80..3f44ae9773 100644 --- a/tests/repository_data/client/metadata/previous/timestamp.json +++ b/tests/repository_data/client/metadata/previous/timestamp.json @@ -3,7 +3,7 @@ { "keyid": "3f09f6468a522bea0364a23315872d8400875dfdb24ff4ecd32f27164d5c23c1", "method": "ed25519", - "sig": "1749729587ae99bdbaeff59562f1a06cfa3f2b8c31d0b82b8f10fcb4cc5fbf21a0df885e57994bae1b542b814cafaf506d357618515c18d8b5e4b23b90e26506" + "sig": "9a43adeef13b9d1d15f2b773e7d62b667761a65b34f2bd04c2565d842c113a85307131cfd6ae9f83d91963503783c6d98692887c322c49a6ae7b1d0869ce2105" } ], "signed": { @@ -12,9 +12,9 @@ "meta": { "snapshot.json": { "hashes": { - "sha256": "b55cf3ef997ca2d2be2ec921cdc8ab278ee77748041f9426dbe4ce1b2c8ba781" + "sha256": "c15b14217a3ad50ae2c136109983b7269d3bda42c4e1e530e30e7b5854b53a11" }, - "length": 636, + "length": 678, "version": 1 } }, diff --git a/tests/repository_data/client/metadata/previous/timestamp.json.gz b/tests/repository_data/client/metadata/previous/timestamp.json.gz index c855737bd9..f59476941f 100644 Binary files a/tests/repository_data/client/metadata/previous/timestamp.json.gz and b/tests/repository_data/client/metadata/previous/timestamp.json.gz differ diff --git a/tests/repository_data/generate.py b/tests/repository_data/generate.py index 32a96ea64c..3ea31f9825 100755 --- a/tests/repository_data/generate.py +++ b/tests/repository_data/generate.py @@ -122,6 +122,9 @@ repository.targets.delegate('role1', [delegation_public], [target3_filepath]) repository.targets('role1').load_signing_key(delegation_private) +repository.targets('role1').delegate('role2', [delegation_public], []) +repository.targets('role2').load_signing_key(delegation_private) + # Set the top-level expiration times far into the future so that # they do not expire anytime soon, or else the tests fail. Unit tests may # modify the expiration datetimes (of the copied files), if they wish. @@ -130,6 +133,7 @@ repository.snapshot.expiration = datetime.datetime(2030, 1, 1, 0, 0) repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 0, 0) repository.targets('role1').expiration = datetime.datetime(2030, 1, 1, 0, 0) +repository.targets('role2').expiration = datetime.datetime(2030, 1, 1, 0, 0) # Compress the top-level role metadata so that the unit tests have a # pre-generated example of compressed metadata. @@ -140,7 +144,7 @@ # Create the actual metadata files, which are saved to 'metadata.staged'. if not options.dry_run: - repository.write() + repository.writeall() # Move the staged.metadata to 'metadata' and create the client folder. The # client folder, which includes the required directory structure and metadata diff --git a/tests/repository_data/repository/metadata.staged/1.root.json b/tests/repository_data/repository/metadata.staged/1.root.json new file mode 100644 index 0000000000..94327a115a --- /dev/null +++ b/tests/repository_data/repository/metadata.staged/1.root.json @@ -0,0 +1,86 @@ +{ + "signatures": [ + { + "keyid": "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503", + "method": "RSASSA-PSS", + "sig": "3851d11ed11ea69ab5d873cfd015de79dc856d83e0a060e73d535d705da086c26191e6bc1ed6bbdde9305c3816c1c5885b48cf51c41fedc906a5ebe0e33a6b823145d40bd3e588e77c6bc724b62f4b2ca9700da03e0ba603170bfd365ea1d25ee7f9661848a14f5916869f00f3e03aa4cb468a4de647bbf205b96f9aa8dd408e3e0b1f9d53fe74654dfe139441dfe3651b3473b67bd104d754112e594a9c6ed0127e94b9057322d630f70c93c01d0cd0c2b98f6abdfd2ed7ac7dc5d3e201d191e168992574edfa935bb2a2cbaa67532c7aaddd4582b53a015c11e567d7fe7ba38cc743e7a939b9e7f2e334b48f46bdf4b82b66e639189644998d90a27847e63e8ade170f8c8aa15c8076b0af8032d78870ac18278663eddb08a7eed30c199c97c81d30bdf47d6649c7ab297120b983d9b6a1da648026d552be73bb77a9346f98a3b8db1a583b71bb706c397a3142f8194c80e62a1632152cd2ffd340605325ea39baf60fb30cf574701e5ae07efee75fc51df4f1810f3ce14345c466d25e36a3" + } + ], + "signed": { + "_type": "Root", + "compression_algorithms": [ + "gz" + ], + "consistent_snapshot": false, + "expires": "2030-01-01T00:00:00Z", + "keys": { + "182216b8800c50ddf000043b31ddf90d815c754ab4e0b31a5952a839b371bed9": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "e8bd29d20025d3ac755a27b8d8efe185e368244000d9d3b76e32afb968cb0ea8" + } + }, + "3f09f6468a522bea0364a23315872d8400875dfdb24ff4ecd32f27164d5c23c1": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "52de9284608be6b718a9d88c7c5d8d9d93b33732e00d670dd4ebe4bce8bbc83c" + } + }, + "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "rsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAsDqUoiFJZX+5gm5pyI1l\nVc/N3yjJVOIl9GyiK0mRyzV3IzUQzhjq8nhk0eLfzXw2XwIAYOJC6dR/tGRG4JDx\nJkez5FFH4zLosr/XzT7CG5zxJ3kKICLD1v9rZQr5ZgARQDOpkxzPz46rGnE0sHd7\nMpnpPMScA1pMIzwM1RoPS4ntZipI1cl9M7HMQ6mkBp8/DNKCqaDWixJqaGgWrhhK\nhI/1mzBliMKriNxPKSCGVlOk/QpZft+y1fs42s0DMd5BOFBo+ZcoXLYRncg9S3A2\nxx/jT69Bt3ceiAZqnp7f6M+ZzoUifSelaoL7QIYg/GkEl+0oxTD0yRphGiCKwn9c\npSbn7NgnbjqSgIMeEtlf/5Coyrs26pyFf/9GbusddPSxxxwIJ/7IJuF7P1Yy0WpZ\nkMeY83h9n2IdnEYi+rpdbLJPQd7Fpu2xrdA3Fokj8AvCpcmxn8NIXZuK++r8/xsE\nAUL30HH7dgVn50AvdPaJnqAORT3OlabW0DK9prcwKnyzAgMBAAE=\n-----END PUBLIC KEY-----" + } + }, + "a0a0f0cf08daff7afd1eb6582756d43987aa73f028044836a5519259706ca19b": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "0692a846935833d685168ae8c98fee951d52d8aa76685b8ba55b8e1eada217c2" + } + } + }, + "roles": { + "root": { + "keyids": [ + "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503" + ], + "threshold": 1 + }, + "snapshot": { + "keyids": [ + "182216b8800c50ddf000043b31ddf90d815c754ab4e0b31a5952a839b371bed9" + ], + "threshold": 1 + }, + "targets": { + "keyids": [ + "a0a0f0cf08daff7afd1eb6582756d43987aa73f028044836a5519259706ca19b" + ], + "threshold": 1 + }, + "timestamp": { + "keyids": [ + "3f09f6468a522bea0364a23315872d8400875dfdb24ff4ecd32f27164d5c23c1" + ], + "threshold": 1 + } + }, + "version": 1 + } +} \ No newline at end of file diff --git a/tests/repository_data/repository/metadata.staged/1.root.json.gz b/tests/repository_data/repository/metadata.staged/1.root.json.gz new file mode 100644 index 0000000000..4a13f7efd6 Binary files /dev/null and b/tests/repository_data/repository/metadata.staged/1.root.json.gz differ diff --git a/tests/repository_data/repository/metadata.staged/role1.json b/tests/repository_data/repository/metadata.staged/role1.json index 37b378d3e2..96d921b5eb 100644 --- a/tests/repository_data/repository/metadata.staged/role1.json +++ b/tests/repository_data/repository/metadata.staged/role1.json @@ -3,14 +3,35 @@ { "keyid": "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9", "method": "ed25519", - "sig": "98c7634186f7a02b3a56d8204e62b3a1d25225935dc47c720426ef591d09931e071f96f8d47ef3ec814dd7278f05c01190e60386ad03e546869c7aeeb3249703" + "sig": "e8f6db97fcad5eb2ca1cf5fc6b6d4579d026811581b0d2061af90c7cb26d966e15a06e7c596f663b05aa061308929f96136167359fc9d44919a36383403abd09" } ], "signed": { "_type": "Targets", "delegations": { - "keys": {}, - "roles": [] + "keys": { + "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "3b11296fe2dba14a2ef204e542e9e4195293bcf3042655e3d7e4ef5afe3cf36a" + } + } + }, + "roles": [ + { + "keyids": [ + "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9" + ], + "name": "role2", + "paths": [], + "terminating": false, + "threshold": 1 + } + ] }, "expires": "2030-01-01T00:00:00Z", "targets": { diff --git a/tests/repository_data/repository/metadata.staged/role1.json.gz b/tests/repository_data/repository/metadata.staged/role1.json.gz index 53d950b9bd..e2fd36f00b 100644 Binary files a/tests/repository_data/repository/metadata.staged/role1.json.gz and b/tests/repository_data/repository/metadata.staged/role1.json.gz differ diff --git a/tests/repository_data/repository/metadata.staged/role2.json b/tests/repository_data/repository/metadata.staged/role2.json new file mode 100644 index 0000000000..20b1206a70 --- /dev/null +++ b/tests/repository_data/repository/metadata.staged/role2.json @@ -0,0 +1,19 @@ +{ + "signatures": [ + { + "keyid": "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9", + "method": "ed25519", + "sig": "8fdca8154157e983d86efb16917ad973941dfa75a47d99a88b393d0955f1508aff55b66d0592ff2ad2f431d6826d6544009a921b5aae503f3f795b09ed549f0a" + } + ], + "signed": { + "_type": "Targets", + "delegations": { + "keys": {}, + "roles": [] + }, + "expires": "2030-01-01T00:00:00Z", + "targets": {}, + "version": 1 + } +} \ No newline at end of file diff --git a/tests/repository_data/repository/metadata.staged/role2.json.gz b/tests/repository_data/repository/metadata.staged/role2.json.gz new file mode 100644 index 0000000000..6074f9bdfd Binary files /dev/null and b/tests/repository_data/repository/metadata.staged/role2.json.gz differ diff --git a/tests/repository_data/repository/metadata.staged/root.json b/tests/repository_data/repository/metadata.staged/root.json index 94327a115a..ab8e8dc859 100644 --- a/tests/repository_data/repository/metadata.staged/root.json +++ b/tests/repository_data/repository/metadata.staged/root.json @@ -3,7 +3,7 @@ { "keyid": "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503", "method": "RSASSA-PSS", - "sig": "3851d11ed11ea69ab5d873cfd015de79dc856d83e0a060e73d535d705da086c26191e6bc1ed6bbdde9305c3816c1c5885b48cf51c41fedc906a5ebe0e33a6b823145d40bd3e588e77c6bc724b62f4b2ca9700da03e0ba603170bfd365ea1d25ee7f9661848a14f5916869f00f3e03aa4cb468a4de647bbf205b96f9aa8dd408e3e0b1f9d53fe74654dfe139441dfe3651b3473b67bd104d754112e594a9c6ed0127e94b9057322d630f70c93c01d0cd0c2b98f6abdfd2ed7ac7dc5d3e201d191e168992574edfa935bb2a2cbaa67532c7aaddd4582b53a015c11e567d7fe7ba38cc743e7a939b9e7f2e334b48f46bdf4b82b66e639189644998d90a27847e63e8ade170f8c8aa15c8076b0af8032d78870ac18278663eddb08a7eed30c199c97c81d30bdf47d6649c7ab297120b983d9b6a1da648026d552be73bb77a9346f98a3b8db1a583b71bb706c397a3142f8194c80e62a1632152cd2ffd340605325ea39baf60fb30cf574701e5ae07efee75fc51df4f1810f3ce14345c466d25e36a3" + "sig": "3a5a1d2c4fba47117b1d297517261da9f13d6bfbdfd322bb68d5631ff456dba6446e35cd0d67cf068d1592cd80333f5566b74225dfbdc2aead60ea4ca2f79a4d4542e9d6039d9715404f07ca05145b02a53241ec30a992161777cc9154e9a8fc37cd292f6dd11af4acfc307b8b4ccb3024ad3d5409d24b91b6ae9f542d8813641f0d8d4c5a16d30f471937c2badcffc591f0e32f81755b44e8139d69042213997d459711c482a7bde2e0177ba0079a3d7cf19f825f0619c114dc88ff9eada298b7e524c727a51fbe5b9e59221f0a515931427aa662022738a03c3b1f44953e6a110e0aacbd55c328f9f0bbe97d3f6bde5fe0b3d390b5da3442ea02cc06b7b5daef31f2356283ab197d11f677c57106897b27ca2c2ada87880d906416d9de90ac1593312af726f4a43f9290b19a81d4d092be6408deb53469dabd27f1d4a16d5f306736d483a5f9b4ab820d4e8ffca2f05ba0e501062da11389da137a0aff7c8111e28269a609fe602eb1786d1732f43d6cddbe6c5847241697a7ed5f395879b4" } ], "signed": { diff --git a/tests/repository_data/repository/metadata.staged/root.json.gz b/tests/repository_data/repository/metadata.staged/root.json.gz index 4a13f7efd6..b4c12d146d 100644 Binary files a/tests/repository_data/repository/metadata.staged/root.json.gz and b/tests/repository_data/repository/metadata.staged/root.json.gz differ diff --git a/tests/repository_data/repository/metadata.staged/snapshot.json b/tests/repository_data/repository/metadata.staged/snapshot.json index d1d22bd88a..bf565452c9 100644 --- a/tests/repository_data/repository/metadata.staged/snapshot.json +++ b/tests/repository_data/repository/metadata.staged/snapshot.json @@ -3,7 +3,7 @@ { "keyid": "182216b8800c50ddf000043b31ddf90d815c754ab4e0b31a5952a839b371bed9", "method": "ed25519", - "sig": "1f81170dfee2f170f6814cb2f909f0ad0a283eb3f8b7924f41d14ea81efab2c43491aaedce30338fce483fcad32ba0bde729e9b6b44888c99401ee04a5e43302" + "sig": "9419a135b0c41fe350d712f944047661ddfa2c8b4cb141088976bc789c8ea55aba6efff78dcfa46b11790136281ae649e1e421713fbab47e274e1afd838aca03" } ], "signed": { @@ -13,9 +13,12 @@ "role1.json": { "version": 1 }, + "role2.json": { + "version": 1 + }, "root.json": { "hashes": { - "sha256": "9d0ed7fce4914cd97997c03def8c94b0aeb10aebc383e0d747a8e8257a84c8ff" + "sha256": "03843cc3b2a50d363894b2aa26e617466147355487d647abd36aba209e69a6e6" }, "length": 3329, "version": 1 diff --git a/tests/repository_data/repository/metadata.staged/snapshot.json.gz b/tests/repository_data/repository/metadata.staged/snapshot.json.gz index 51d94d42f1..88aa0f12c9 100644 Binary files a/tests/repository_data/repository/metadata.staged/snapshot.json.gz and b/tests/repository_data/repository/metadata.staged/snapshot.json.gz differ diff --git a/tests/repository_data/repository/metadata.staged/targets.json b/tests/repository_data/repository/metadata.staged/targets.json index 8c9337d88c..0620bfe42a 100644 --- a/tests/repository_data/repository/metadata.staged/targets.json +++ b/tests/repository_data/repository/metadata.staged/targets.json @@ -3,7 +3,7 @@ { "keyid": "a0a0f0cf08daff7afd1eb6582756d43987aa73f028044836a5519259706ca19b", "method": "ed25519", - "sig": "e96f9ca4425a37919dc91d5679c5319150b41f729389d70be7d8c8dc3dda647aa9fd11ca3c6a959c10819d652e516b375caf147721f96af329b54c0720373c06" + "sig": "74ee9970ed709ab65586ef99c0005102676a92f11e2a448bb685875b641d2efe3fd2bdefaa90e1a050bfbb34163834aadb43d13ac0c7452aa7df27c454c34507" } ], "signed": { @@ -23,7 +23,6 @@ }, "roles": [ { - "backtrack": true, "keyids": [ "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9" ], @@ -31,6 +30,7 @@ "paths": [ "/file3.txt" ], + "terminating": false, "threshold": 1 } ] diff --git a/tests/repository_data/repository/metadata.staged/targets.json.gz b/tests/repository_data/repository/metadata.staged/targets.json.gz index 44e222812c..0753ea6945 100644 Binary files a/tests/repository_data/repository/metadata.staged/targets.json.gz and b/tests/repository_data/repository/metadata.staged/targets.json.gz differ diff --git a/tests/repository_data/repository/metadata.staged/timestamp.json b/tests/repository_data/repository/metadata.staged/timestamp.json index e886fb3e80..3f44ae9773 100644 --- a/tests/repository_data/repository/metadata.staged/timestamp.json +++ b/tests/repository_data/repository/metadata.staged/timestamp.json @@ -3,7 +3,7 @@ { "keyid": "3f09f6468a522bea0364a23315872d8400875dfdb24ff4ecd32f27164d5c23c1", "method": "ed25519", - "sig": "1749729587ae99bdbaeff59562f1a06cfa3f2b8c31d0b82b8f10fcb4cc5fbf21a0df885e57994bae1b542b814cafaf506d357618515c18d8b5e4b23b90e26506" + "sig": "9a43adeef13b9d1d15f2b773e7d62b667761a65b34f2bd04c2565d842c113a85307131cfd6ae9f83d91963503783c6d98692887c322c49a6ae7b1d0869ce2105" } ], "signed": { @@ -12,9 +12,9 @@ "meta": { "snapshot.json": { "hashes": { - "sha256": "b55cf3ef997ca2d2be2ec921cdc8ab278ee77748041f9426dbe4ce1b2c8ba781" + "sha256": "c15b14217a3ad50ae2c136109983b7269d3bda42c4e1e530e30e7b5854b53a11" }, - "length": 636, + "length": 678, "version": 1 } }, diff --git a/tests/repository_data/repository/metadata.staged/timestamp.json.gz b/tests/repository_data/repository/metadata.staged/timestamp.json.gz index c855737bd9..f59476941f 100644 Binary files a/tests/repository_data/repository/metadata.staged/timestamp.json.gz and b/tests/repository_data/repository/metadata.staged/timestamp.json.gz differ diff --git a/tests/repository_data/repository/metadata/1.root.json b/tests/repository_data/repository/metadata/1.root.json new file mode 100644 index 0000000000..94327a115a --- /dev/null +++ b/tests/repository_data/repository/metadata/1.root.json @@ -0,0 +1,86 @@ +{ + "signatures": [ + { + "keyid": "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503", + "method": "RSASSA-PSS", + "sig": "3851d11ed11ea69ab5d873cfd015de79dc856d83e0a060e73d535d705da086c26191e6bc1ed6bbdde9305c3816c1c5885b48cf51c41fedc906a5ebe0e33a6b823145d40bd3e588e77c6bc724b62f4b2ca9700da03e0ba603170bfd365ea1d25ee7f9661848a14f5916869f00f3e03aa4cb468a4de647bbf205b96f9aa8dd408e3e0b1f9d53fe74654dfe139441dfe3651b3473b67bd104d754112e594a9c6ed0127e94b9057322d630f70c93c01d0cd0c2b98f6abdfd2ed7ac7dc5d3e201d191e168992574edfa935bb2a2cbaa67532c7aaddd4582b53a015c11e567d7fe7ba38cc743e7a939b9e7f2e334b48f46bdf4b82b66e639189644998d90a27847e63e8ade170f8c8aa15c8076b0af8032d78870ac18278663eddb08a7eed30c199c97c81d30bdf47d6649c7ab297120b983d9b6a1da648026d552be73bb77a9346f98a3b8db1a583b71bb706c397a3142f8194c80e62a1632152cd2ffd340605325ea39baf60fb30cf574701e5ae07efee75fc51df4f1810f3ce14345c466d25e36a3" + } + ], + "signed": { + "_type": "Root", + "compression_algorithms": [ + "gz" + ], + "consistent_snapshot": false, + "expires": "2030-01-01T00:00:00Z", + "keys": { + "182216b8800c50ddf000043b31ddf90d815c754ab4e0b31a5952a839b371bed9": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "e8bd29d20025d3ac755a27b8d8efe185e368244000d9d3b76e32afb968cb0ea8" + } + }, + "3f09f6468a522bea0364a23315872d8400875dfdb24ff4ecd32f27164d5c23c1": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "52de9284608be6b718a9d88c7c5d8d9d93b33732e00d670dd4ebe4bce8bbc83c" + } + }, + "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "rsa", + "keyval": { + "public": "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAsDqUoiFJZX+5gm5pyI1l\nVc/N3yjJVOIl9GyiK0mRyzV3IzUQzhjq8nhk0eLfzXw2XwIAYOJC6dR/tGRG4JDx\nJkez5FFH4zLosr/XzT7CG5zxJ3kKICLD1v9rZQr5ZgARQDOpkxzPz46rGnE0sHd7\nMpnpPMScA1pMIzwM1RoPS4ntZipI1cl9M7HMQ6mkBp8/DNKCqaDWixJqaGgWrhhK\nhI/1mzBliMKriNxPKSCGVlOk/QpZft+y1fs42s0DMd5BOFBo+ZcoXLYRncg9S3A2\nxx/jT69Bt3ceiAZqnp7f6M+ZzoUifSelaoL7QIYg/GkEl+0oxTD0yRphGiCKwn9c\npSbn7NgnbjqSgIMeEtlf/5Coyrs26pyFf/9GbusddPSxxxwIJ/7IJuF7P1Yy0WpZ\nkMeY83h9n2IdnEYi+rpdbLJPQd7Fpu2xrdA3Fokj8AvCpcmxn8NIXZuK++r8/xsE\nAUL30HH7dgVn50AvdPaJnqAORT3OlabW0DK9prcwKnyzAgMBAAE=\n-----END PUBLIC KEY-----" + } + }, + "a0a0f0cf08daff7afd1eb6582756d43987aa73f028044836a5519259706ca19b": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "0692a846935833d685168ae8c98fee951d52d8aa76685b8ba55b8e1eada217c2" + } + } + }, + "roles": { + "root": { + "keyids": [ + "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503" + ], + "threshold": 1 + }, + "snapshot": { + "keyids": [ + "182216b8800c50ddf000043b31ddf90d815c754ab4e0b31a5952a839b371bed9" + ], + "threshold": 1 + }, + "targets": { + "keyids": [ + "a0a0f0cf08daff7afd1eb6582756d43987aa73f028044836a5519259706ca19b" + ], + "threshold": 1 + }, + "timestamp": { + "keyids": [ + "3f09f6468a522bea0364a23315872d8400875dfdb24ff4ecd32f27164d5c23c1" + ], + "threshold": 1 + } + }, + "version": 1 + } +} \ No newline at end of file diff --git a/tests/repository_data/repository/metadata/1.root.json.gz b/tests/repository_data/repository/metadata/1.root.json.gz new file mode 100644 index 0000000000..4a13f7efd6 Binary files /dev/null and b/tests/repository_data/repository/metadata/1.root.json.gz differ diff --git a/tests/repository_data/repository/metadata/role1.json b/tests/repository_data/repository/metadata/role1.json index 37b378d3e2..96d921b5eb 100644 --- a/tests/repository_data/repository/metadata/role1.json +++ b/tests/repository_data/repository/metadata/role1.json @@ -3,14 +3,35 @@ { "keyid": "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9", "method": "ed25519", - "sig": "98c7634186f7a02b3a56d8204e62b3a1d25225935dc47c720426ef591d09931e071f96f8d47ef3ec814dd7278f05c01190e60386ad03e546869c7aeeb3249703" + "sig": "e8f6db97fcad5eb2ca1cf5fc6b6d4579d026811581b0d2061af90c7cb26d966e15a06e7c596f663b05aa061308929f96136167359fc9d44919a36383403abd09" } ], "signed": { "_type": "Targets", "delegations": { - "keys": {}, - "roles": [] + "keys": { + "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9": { + "keyid_hash_algorithms": [ + "sha256", + "sha512" + ], + "keytype": "ed25519", + "keyval": { + "public": "3b11296fe2dba14a2ef204e542e9e4195293bcf3042655e3d7e4ef5afe3cf36a" + } + } + }, + "roles": [ + { + "keyids": [ + "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9" + ], + "name": "role2", + "paths": [], + "terminating": false, + "threshold": 1 + } + ] }, "expires": "2030-01-01T00:00:00Z", "targets": { diff --git a/tests/repository_data/repository/metadata/role1.json.gz b/tests/repository_data/repository/metadata/role1.json.gz index 53d950b9bd..e2fd36f00b 100644 Binary files a/tests/repository_data/repository/metadata/role1.json.gz and b/tests/repository_data/repository/metadata/role1.json.gz differ diff --git a/tests/repository_data/repository/metadata/role2.json b/tests/repository_data/repository/metadata/role2.json new file mode 100644 index 0000000000..20b1206a70 --- /dev/null +++ b/tests/repository_data/repository/metadata/role2.json @@ -0,0 +1,19 @@ +{ + "signatures": [ + { + "keyid": "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9", + "method": "ed25519", + "sig": "8fdca8154157e983d86efb16917ad973941dfa75a47d99a88b393d0955f1508aff55b66d0592ff2ad2f431d6826d6544009a921b5aae503f3f795b09ed549f0a" + } + ], + "signed": { + "_type": "Targets", + "delegations": { + "keys": {}, + "roles": [] + }, + "expires": "2030-01-01T00:00:00Z", + "targets": {}, + "version": 1 + } +} \ No newline at end of file diff --git a/tests/repository_data/repository/metadata/role2.json.gz b/tests/repository_data/repository/metadata/role2.json.gz new file mode 100644 index 0000000000..6074f9bdfd Binary files /dev/null and b/tests/repository_data/repository/metadata/role2.json.gz differ diff --git a/tests/repository_data/repository/metadata/root.json b/tests/repository_data/repository/metadata/root.json index 94327a115a..ab8e8dc859 100644 --- a/tests/repository_data/repository/metadata/root.json +++ b/tests/repository_data/repository/metadata/root.json @@ -3,7 +3,7 @@ { "keyid": "5602f4df0cd26b2112f0833b1ce8d5fcbb595754961d3a04f37b9815e2ced503", "method": "RSASSA-PSS", - "sig": "3851d11ed11ea69ab5d873cfd015de79dc856d83e0a060e73d535d705da086c26191e6bc1ed6bbdde9305c3816c1c5885b48cf51c41fedc906a5ebe0e33a6b823145d40bd3e588e77c6bc724b62f4b2ca9700da03e0ba603170bfd365ea1d25ee7f9661848a14f5916869f00f3e03aa4cb468a4de647bbf205b96f9aa8dd408e3e0b1f9d53fe74654dfe139441dfe3651b3473b67bd104d754112e594a9c6ed0127e94b9057322d630f70c93c01d0cd0c2b98f6abdfd2ed7ac7dc5d3e201d191e168992574edfa935bb2a2cbaa67532c7aaddd4582b53a015c11e567d7fe7ba38cc743e7a939b9e7f2e334b48f46bdf4b82b66e639189644998d90a27847e63e8ade170f8c8aa15c8076b0af8032d78870ac18278663eddb08a7eed30c199c97c81d30bdf47d6649c7ab297120b983d9b6a1da648026d552be73bb77a9346f98a3b8db1a583b71bb706c397a3142f8194c80e62a1632152cd2ffd340605325ea39baf60fb30cf574701e5ae07efee75fc51df4f1810f3ce14345c466d25e36a3" + "sig": "3a5a1d2c4fba47117b1d297517261da9f13d6bfbdfd322bb68d5631ff456dba6446e35cd0d67cf068d1592cd80333f5566b74225dfbdc2aead60ea4ca2f79a4d4542e9d6039d9715404f07ca05145b02a53241ec30a992161777cc9154e9a8fc37cd292f6dd11af4acfc307b8b4ccb3024ad3d5409d24b91b6ae9f542d8813641f0d8d4c5a16d30f471937c2badcffc591f0e32f81755b44e8139d69042213997d459711c482a7bde2e0177ba0079a3d7cf19f825f0619c114dc88ff9eada298b7e524c727a51fbe5b9e59221f0a515931427aa662022738a03c3b1f44953e6a110e0aacbd55c328f9f0bbe97d3f6bde5fe0b3d390b5da3442ea02cc06b7b5daef31f2356283ab197d11f677c57106897b27ca2c2ada87880d906416d9de90ac1593312af726f4a43f9290b19a81d4d092be6408deb53469dabd27f1d4a16d5f306736d483a5f9b4ab820d4e8ffca2f05ba0e501062da11389da137a0aff7c8111e28269a609fe602eb1786d1732f43d6cddbe6c5847241697a7ed5f395879b4" } ], "signed": { diff --git a/tests/repository_data/repository/metadata/root.json.gz b/tests/repository_data/repository/metadata/root.json.gz index 4a13f7efd6..b4c12d146d 100644 Binary files a/tests/repository_data/repository/metadata/root.json.gz and b/tests/repository_data/repository/metadata/root.json.gz differ diff --git a/tests/repository_data/repository/metadata/snapshot.json b/tests/repository_data/repository/metadata/snapshot.json index d1d22bd88a..bf565452c9 100644 --- a/tests/repository_data/repository/metadata/snapshot.json +++ b/tests/repository_data/repository/metadata/snapshot.json @@ -3,7 +3,7 @@ { "keyid": "182216b8800c50ddf000043b31ddf90d815c754ab4e0b31a5952a839b371bed9", "method": "ed25519", - "sig": "1f81170dfee2f170f6814cb2f909f0ad0a283eb3f8b7924f41d14ea81efab2c43491aaedce30338fce483fcad32ba0bde729e9b6b44888c99401ee04a5e43302" + "sig": "9419a135b0c41fe350d712f944047661ddfa2c8b4cb141088976bc789c8ea55aba6efff78dcfa46b11790136281ae649e1e421713fbab47e274e1afd838aca03" } ], "signed": { @@ -13,9 +13,12 @@ "role1.json": { "version": 1 }, + "role2.json": { + "version": 1 + }, "root.json": { "hashes": { - "sha256": "9d0ed7fce4914cd97997c03def8c94b0aeb10aebc383e0d747a8e8257a84c8ff" + "sha256": "03843cc3b2a50d363894b2aa26e617466147355487d647abd36aba209e69a6e6" }, "length": 3329, "version": 1 diff --git a/tests/repository_data/repository/metadata/snapshot.json.gz b/tests/repository_data/repository/metadata/snapshot.json.gz index 51d94d42f1..88aa0f12c9 100644 Binary files a/tests/repository_data/repository/metadata/snapshot.json.gz and b/tests/repository_data/repository/metadata/snapshot.json.gz differ diff --git a/tests/repository_data/repository/metadata/targets.json b/tests/repository_data/repository/metadata/targets.json index 8c9337d88c..0620bfe42a 100644 --- a/tests/repository_data/repository/metadata/targets.json +++ b/tests/repository_data/repository/metadata/targets.json @@ -3,7 +3,7 @@ { "keyid": "a0a0f0cf08daff7afd1eb6582756d43987aa73f028044836a5519259706ca19b", "method": "ed25519", - "sig": "e96f9ca4425a37919dc91d5679c5319150b41f729389d70be7d8c8dc3dda647aa9fd11ca3c6a959c10819d652e516b375caf147721f96af329b54c0720373c06" + "sig": "74ee9970ed709ab65586ef99c0005102676a92f11e2a448bb685875b641d2efe3fd2bdefaa90e1a050bfbb34163834aadb43d13ac0c7452aa7df27c454c34507" } ], "signed": { @@ -23,7 +23,6 @@ }, "roles": [ { - "backtrack": true, "keyids": [ "a0650f29dde63403cc4eec28a1c66f2262d6339434a01c63a881a48bedd9bca9" ], @@ -31,6 +30,7 @@ "paths": [ "/file3.txt" ], + "terminating": false, "threshold": 1 } ] diff --git a/tests/repository_data/repository/metadata/targets.json.gz b/tests/repository_data/repository/metadata/targets.json.gz index 44e222812c..0753ea6945 100644 Binary files a/tests/repository_data/repository/metadata/targets.json.gz and b/tests/repository_data/repository/metadata/targets.json.gz differ diff --git a/tests/repository_data/repository/metadata/timestamp.json b/tests/repository_data/repository/metadata/timestamp.json index e886fb3e80..3f44ae9773 100644 --- a/tests/repository_data/repository/metadata/timestamp.json +++ b/tests/repository_data/repository/metadata/timestamp.json @@ -3,7 +3,7 @@ { "keyid": "3f09f6468a522bea0364a23315872d8400875dfdb24ff4ecd32f27164d5c23c1", "method": "ed25519", - "sig": "1749729587ae99bdbaeff59562f1a06cfa3f2b8c31d0b82b8f10fcb4cc5fbf21a0df885e57994bae1b542b814cafaf506d357618515c18d8b5e4b23b90e26506" + "sig": "9a43adeef13b9d1d15f2b773e7d62b667761a65b34f2bd04c2565d842c113a85307131cfd6ae9f83d91963503783c6d98692887c322c49a6ae7b1d0869ce2105" } ], "signed": { @@ -12,9 +12,9 @@ "meta": { "snapshot.json": { "hashes": { - "sha256": "b55cf3ef997ca2d2be2ec921cdc8ab278ee77748041f9426dbe4ce1b2c8ba781" + "sha256": "c15b14217a3ad50ae2c136109983b7269d3bda42c4e1e530e30e7b5854b53a11" }, - "length": 636, + "length": 678, "version": 1 } }, diff --git a/tests/repository_data/repository/metadata/timestamp.json.gz b/tests/repository_data/repository/metadata/timestamp.json.gz index c855737bd9..f59476941f 100644 Binary files a/tests/repository_data/repository/metadata/timestamp.json.gz and b/tests/repository_data/repository/metadata/timestamp.json.gz differ diff --git a/tests/repository_tool.py b/tests/repository_tool.py deleted file mode 100755 index 042adea63d..0000000000 --- a/tests/repository_tool.py +++ /dev/null @@ -1,2866 +0,0 @@ -#!/usr/bin/env python - -""" - - repository_tool.py - - - Vladimir Diaz - - - October 19, 2013 - - - See LICENSE for licensing information. - - - Provide a tool that can create a TUF repository. It can be used with the - Python interpreter in interactive mode, or imported directly into a Python - module. See 'tuf/README' for the complete guide to using - 'tuf.repository_tool.py'. -""" - -# Help with Python 3 compatibility, where the print statement is a function, an -# implicit relative import is invalid, and the '/' operator performs true -# division. Example: print 'hello world' raises a 'SyntaxError' exception. -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division -from __future__ import unicode_literals - -import os -import errno -import time -import datetime -import logging -import tempfile -import shutil -import json -import random - -import tuf -import tuf.formats -import tuf.util -import tuf.keydb -import tuf.roledb -import tuf.keys -import tuf.sig -import tuf.log -import tuf.conf -import tuf.repository_lib as repo_lib -from tuf.repository_lib import generate_and_write_rsa_keypair -from tuf.repository_lib import generate_and_write_ed25519_keypair -from tuf.repository_lib import import_rsa_publickey_from_file -from tuf.repository_lib import import_ed25519_publickey_from_file -from tuf.repository_lib import import_rsa_privatekey_from_file -from tuf.repository_lib import import_ed25519_privatekey_from_file -from tuf.repository_lib import create_tuf_client_directory -from tuf.repository_lib import disable_console_log_messages - -import iso8601 -import six - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger('tuf.repository_tool') - -# Add a console handler so that users are aware of potentially unintended -# states, such as multiple roles that share keys. -tuf.log.add_console_handler() -tuf.log.set_console_log_level(logging.INFO) - -# The algorithm used by the repository to generate the digests of the -# target filepaths, which are included in metadata files and may be prepended -# to the filenames of consistent snapshots. -HASH_FUNCTION = 'sha256' - -# The targets and metadata directory names. Metadata files are written -# to the staged metadata directory instead of the "live" one. -METADATA_STAGED_DIRECTORY_NAME = 'metadata.staged' -METADATA_DIRECTORY_NAME = 'metadata' -TARGETS_DIRECTORY_NAME = 'targets' - -# The extension of TUF metadata. -METADATA_EXTENSION = '.json' - -# Expiration date delta, in seconds, of the top-level roles. A metadata -# expiration date is set by taking the current time and adding the expiration -# seconds listed below. - -# Initial 'root.json' expiration time of 1 year. -ROOT_EXPIRATION = 31556900 - -# Initial 'targets.json' expiration time of 3 months. -TARGETS_EXPIRATION = 7889230 - -# Initial 'snapshot.json' expiration time of 1 week. -SNAPSHOT_EXPIRATION = 604800 - -# Initial 'timestamp.json' expiration time of 1 day. -TIMESTAMP_EXPIRATION = 86400 - -try: - tuf.keys.check_crypto_libraries(['rsa', 'ed25519', 'general']) - -except tuf.UnsupportedLibraryError: #pragma: no cover - logger.warn('Warning: The repository and developer tools require' - ' additional libraries, which can be installed as follows:' - '\n $ pip install tuf[tools]') - - -class Repository(object): - """ - - Represent a TUF repository that contains the metadata of the top-level - roles, including all those delegated from the 'targets.json' role. The - repository object returned provides access to the top-level roles, and any - delegated targets that are added as the repository is modified. For - example, a Repository object named 'repository' provides the following - access by default: - - repository.root.version = 2 - repository.timestamp.expiration = datetime.datetime(2015, 8, 8, 12, 0) - repository.snapshot.add_verification_key(...) - repository.targets.delegate('unclaimed', ...) - - Delegating a role from 'targets' updates the attributes of the parent - delegation, which then provides: - - repository.targets('unclaimed').add_verification_key(...) - - - - repository_directory: - The root folder of the repository that contains the metadata and targets - sub-directories. - - metadata_directory: - The metadata sub-directory contains the files of the top-level - roles, including all roles delegated from 'targets.json'. - - targets_directory: - The targets sub-directory contains all the target files that are - downloaded by clients and are referenced in TUF Metadata. The hashes and - file lengths are listed in Metadata files so that they are securely - downloaded. Metadata files are similarly referenced in the top-level - metadata. - - - tuf.FormatError, if the arguments are improperly formatted. - - - Creates top-level role objects and assigns them as attributes. - - - A Repository object that contains default Metadata objects for the top-level - roles. - """ - - def __init__(self, repository_directory, metadata_directory, targets_directory): - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.PATH_SCHEMA.check_match(repository_directory) - tuf.formats.PATH_SCHEMA.check_match(metadata_directory) - tuf.formats.PATH_SCHEMA.check_match(targets_directory) - - self._repository_directory = repository_directory - self._metadata_directory = metadata_directory - self._targets_directory = targets_directory - - # Set the top-level role objects. - self.root = Root() - self.snapshot = Snapshot() - self.timestamp = Timestamp() - self.targets = Targets(self._targets_directory, 'targets') - - - - def write(self, write_partial=False, consistent_snapshot=False, - compression_algorithms=['gz']): - """ - - Write all the JSON Metadata objects to their corresponding files. - write() raises an exception if any of the role metadata to be written to - disk is invalid, such as an insufficient threshold of signatures, missing - private keys, etc. - - - write_partial: - A boolean indicating whether partial metadata should be written to - disk. Partial metadata may be written to allow multiple maintainters - to independently sign and update role metadata. write() raises an - exception if a metadata role cannot be written due to not having enough - signatures. - - consistent_snapshot: - A boolean indicating whether written metadata and target files should - include a version number in the filename (i.e., - .root.json, .targets.json.gz, - .README.json, where is the file's - SHA256 digest. Example: 13.root.json' - - compression_algorithms: - A list of compression algorithms. Each of these algorithms will be - used to compress all of the metadata available on the repository. - By default, all metadata is compressed with gzip. - - - tuf.UnsignedMetadataError, if any of the top-level and delegated roles do - not have the minimum threshold of signatures. - - - Creates metadata files in the repository's metadata directory. - - - None. - """ - - # Does 'write_partial' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.BOOLEAN_SCHEMA.check_match(write_partial) - tuf.formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - tuf.formats.COMPRESSIONS_SCHEMA.check_match(compression_algorithms) - - # At this point the tuf.keydb and tuf.roledb stores must be fully - # populated, otherwise write() throws a 'tuf.UnsignedMetadataError' - # exception if any of the top-level roles are missing signatures, keys, etc. - - # Write the metadata files of all the delegated roles that are dirty (i.e., - # have been modified via roledb.update_roleinfo()). - for delegated_rolename in tuf.roledb.get_dirty_roles(): - - # Ignore top-level roles, they will be generated later on in this method. - if delegated_rolename in ['root', 'targets', 'snapshot', 'timestamp']: - continue - - delegated_filename = os.path.join(self._metadata_directory, - delegated_rolename + METADATA_EXTENSION) - - repo_lib._generate_and_write_metadata(delegated_rolename, - delegated_filename, - write_partial, - self._targets_directory, - self._metadata_directory, - consistent_snapshot) - - # Generate the 'root.json' metadata file. - # _generate_and_write_metadata() raises a 'tuf.Error' exception if the - # metadata cannot be written. - root_filename = repo_lib.ROOT_FILENAME - root_filename = os.path.join(self._metadata_directory, root_filename) - - signable_junk, root_filename = \ - repo_lib._generate_and_write_metadata('root', root_filename, write_partial, - self._targets_directory, - self._metadata_directory, - consistent_snapshot) - - # Generate the 'targets.json' metadata file. - targets_filename = repo_lib.TARGETS_FILENAME - targets_filename = os.path.join(self._metadata_directory, targets_filename) - - signable_junk, targets_filename = \ - repo_lib._generate_and_write_metadata('targets', targets_filename, - write_partial, - self._targets_directory, - self._metadata_directory, - consistent_snapshot) - - # Generate the 'snapshot.json' metadata file. - snapshot_filename = repo_lib.SNAPSHOT_FILENAME - snapshot_filename = os.path.join(self._metadata_directory, snapshot_filename) - filenames = {'root': root_filename, 'targets': targets_filename} - snapshot_signable = None - - snapshot_signable, snapshot_filename = \ - repo_lib._generate_and_write_metadata('snapshot', snapshot_filename, - write_partial, - self._targets_directory, - self._metadata_directory, - consistent_snapshot, filenames) - - # Generate the 'timestamp.json' metadata file. - timestamp_filename = repo_lib.TIMESTAMP_FILENAME - timestamp_filename = os.path.join(self._metadata_directory, timestamp_filename) - filenames = {'snapshot': snapshot_filename} - - repo_lib._generate_and_write_metadata('timestamp', timestamp_filename, - write_partial, - self._targets_directory, - self._metadata_directory, - consistent_snapshot, filenames) - - # Delete the metadata of roles no longer in 'tuf.roledb'. Obsolete roles - # may have been revoked and should no longer have their metadata files - # available on disk, otherwise loading a repository may unintentionally load - # them. - repo_lib._delete_obsolete_metadata(self._metadata_directory, - snapshot_signable['signed'], - consistent_snapshot) - - - - def write_partial(self): - """ - - Write all the JSON Metadata objects to their corresponding files, but - allow metadata files to contain an invalid threshold of signatures. - - - None. - - - None. - - - Creates metadata files in the repository's metadata directory. - - - None. - """ - - self.write(write_partial=True) - - - - def status(self): - """ - - Determine the status of the top-level roles, including those delegated by - the Targets role. status() checks if each role provides sufficient public - and private keys, signatures, and that a valid metadata file is generated - if write() were to be called. Metadata files are temporarily written so - that file hashes and lengths may be verified, determine if delegated role - trust is fully obeyed, and target paths valid according to parent roles. - status() does not do a simple check for number of threshold keys and - signatures. - - - None. - - - None. - - - Generates and writes temporary metadata files. - - - None. - """ - - temp_repository_directory = None - - # Generate and write temporary metadata so that full verification of - # metadata is possible, such as verifying signatures, digests, and file - # content. Ensure temporary files generated are removed after verification - # results are completed. - try: - temp_repository_directory = tempfile.mkdtemp() - targets_directory = self._targets_directory - metadata_directory = os.path.join(temp_repository_directory, - METADATA_STAGED_DIRECTORY_NAME) - os.mkdir(metadata_directory) - - - # Retrieve the roleinfo of the delegated roles, exluding the top-level - # targets role. - delegated_roles = tuf.roledb.get_delegated_rolenames('targets') - insufficient_keys = [] - insufficient_signatures = [] - - # Iterate the list of delegated roles and determine the list of invalid - # roles. First verify the public and private keys, and then the generated - # metadata file. - for delegated_role in delegated_roles: - filename = delegated_role + METADATA_EXTENSION - filename = os.path.join(metadata_directory, filename) - - # Ensure the parent directories of 'filename' exist, otherwise an - # IO exception is raised if 'filename' is written to a sub-directory. - tuf.util.ensure_parent_dir(filename) - - # Append any invalid roles to the 'insufficient_keys' and - # 'insufficient_signatures' lists - try: - repo_lib._check_role_keys(delegated_role) - - except tuf.InsufficientKeysError: - insufficient_keys.append(delegated_role) - continue - - try: - repo_lib._generate_and_write_metadata(delegated_role, filename, False, - targets_directory, - metadata_directory) - except tuf.UnsignedMetadataError: - insufficient_signatures.append(delegated_role) - - # Log the verification results of the delegated roles and return - # immediately after each invalid case. - if len(insufficient_keys): - logger.info('Delegated roles with insufficient' - ' keys:\n' + repr(insufficient_keys)) - return - - if len(insufficient_signatures): - logger.info('Delegated roles with insufficient' - ' signatures:\n' + repr(insufficient_signatures)) - return - - # Verify the top-level roles and log the results. - repo_lib._log_status_of_top_level_roles(targets_directory, - metadata_directory) - - finally: - shutil.rmtree(temp_repository_directory, ignore_errors=True) - - - @staticmethod - def get_filepaths_in_directory(files_directory, recursive_walk=False, - followlinks=True): - """ - - Walk the given 'files_directory' and build a list of target files found. - - - files_directory: - The path to a directory of target files. - - recursive_walk: - To recursively walk the directory, set recursive_walk=True. - - followlinks: - To follow symbolic links, set followlinks=True. - - - tuf.FormatError, if the arguments are improperly formatted. - - tuf.Error, if 'file_directory' is not a valid directory. - - Python IO exceptions. - - - None. - - - A list of absolute paths to target files in the given 'files_directory'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.PATH_SCHEMA.check_match(files_directory) - tuf.formats.BOOLEAN_SCHEMA.check_match(recursive_walk) - tuf.formats.BOOLEAN_SCHEMA.check_match(followlinks) - - # Ensure a valid directory is given. - if not os.path.isdir(files_directory): - raise tuf.Error(repr(files_directory) + ' is not a directory.') - - # A list of the target filepaths found in 'files_directory'. - targets = [] - - # FIXME: We need a way to tell Python 2, but not Python 3, to return - # filenames in Unicode; see #61 and: - # http://docs.python.org/2/howto/unicode.html#unicode-filenames - for dirpath, dirnames, filenames in os.walk(files_directory, - followlinks=followlinks): - for filename in filenames: - full_target_path = os.path.join(dirpath, filename) - targets.append(full_target_path) - - # Prune the subdirectories to walk right now if we do not wish to - # recursively walk files_directory. - if recursive_walk is False: - del dirnames[:] - - return targets - - - - - -class Metadata(object): - """ - - Provide a base class to represent a TUF Metadata role. There are four - top-level roles: Root, Targets, Snapshot, and Timestamp. The Metadata class - provides methods that are needed by all top-level roles, such as adding - and removing public keys, private keys, and signatures. Metadata - attributes, such as rolename, version, threshold, expiration, key list, and - compressions, is also provided by the Metadata base class. - - - None. - - - None. - - - None. - - - None. - """ - - def __init__(self): - self._rolename = None - - - - def add_verification_key(self, key): - """ - - Add 'key' to the role. Adding a key, which should contain only the public - portion, signifies the corresponding private key and signatures the role - is expected to provide. A threshold of signatures is required for a role - to be considered properly signed. If a metadata file contains an - insufficient threshold of signatures, it must not be accepted. - - >>> - >>> - >>> - - - key: - The role key to be added, conformant to 'tuf.formats.ANYKEY_SCHEMA'. - Adding a public key to a role means that its corresponding private key - must generate and add its signature to the role. A threshold number of - signatures is required for a role to be fully signed. - - - tuf.FormatError, if the 'key' argument is improperly formatted. - - - The role's entries in 'tuf.keydb.py' and 'tuf.roledb.py' are updated. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.ANYKEY_SCHEMA.check_match(key) - - # Ensure 'key', which should contain the public portion, is added to - # 'tuf.keydb.py'. Add 'key' to the list of recognized keys. Keys may be - # shared, so do not raise an exception if 'key' has already been loaded. - try: - tuf.keydb.add_key(key) - - except tuf.KeyAlreadyExistsError: - logger.warning('Adding a verification key that has already been used.') - - keyid = key['keyid'] - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - - # Add 'key' to the role's entry in 'tuf.roledb.py' and avoid duplicates. - if keyid not in roleinfo['keyids']: - roleinfo['keyids'].append(keyid) - - tuf.roledb.update_roleinfo(self._rolename, roleinfo) - - - - def remove_verification_key(self, key): - """ - - Remove 'key' from the role's currently recognized list of role keys. - The role expects a threshold number of signatures. - - >>> - >>> - >>> - - - key: - The role's key, conformant to 'tuf.formats.ANYKEY_SCHEMA'. 'key' - should contain only the public portion, as only the public key is - needed. The 'add_verification_key()' method should have previously - added 'key'. - - - tuf.FormatError, if the 'key' argument is improperly formatted. - - tuf.Error, if the 'key' argument has not been previously added. - - - Updates the role's 'tuf.roledb.py' entry. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.ANYKEY_SCHEMA.check_match(key) - - keyid = key['keyid'] - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - - if keyid in roleinfo['keyids']: - roleinfo['keyids'].remove(keyid) - - tuf.roledb.update_roleinfo(self._rolename, roleinfo) - - else: - raise tuf.Error('Verification key not found.') - - - - def load_signing_key(self, key): - """ - - Load the role key, which must contain the private portion, so that role - signatures may be generated when the role's metadata file is eventually - written to disk. - - >>> - >>> - >>> - - - key: - The role's key, conformant to 'tuf.formats.ANYKEY_SCHEMA'. It must - contain the private key, so that role signatures may be generated when - write() or write_partial() is eventually called to generate valid - metadata files. - - - tuf.FormatError, if 'key' is improperly formatted. - - tuf.Error, if the private key is not found in 'key'. - - - Updates the role's 'tuf.keydb.py' and 'tuf.roledb.py' entries. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.ANYKEY_SCHEMA.check_match(key) - - # Ensure the private portion of the key is available, otherwise signatures - # cannot be generated when the metadata file is written to disk. - if not len(key['keyval']['private']): - raise tuf.Error('This is not a private key.') - - # Has the key, with the private portion included, been added to the keydb? - # The public version of the key may have been previously added. - try: - tuf.keydb.add_key(key) - - except tuf.KeyAlreadyExistsError: - tuf.keydb.remove_key(key['keyid']) - tuf.keydb.add_key(key) - - # Update the role's 'signing_keys' field in 'tuf.roledb.py'. - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - if key['keyid'] not in roleinfo['signing_keyids']: - roleinfo['signing_keyids'].append(key['keyid']) - - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - - - def unload_signing_key(self, key): - """ - - Remove a previously loaded role private key (i.e., load_signing_key()). - The keyid of the 'key' is removed from the list of recognized signing - keys. - - >>> - >>> - >>> - - - key: - The role key to be unloaded, conformant to 'tuf.formats.ANYKEY_SCHEMA'. - - - tuf.FormatError, if the 'key' argument is improperly formatted. - - tuf.Error, if the 'key' argument has not been previously loaded. - - - Updates the signing keys of the role in 'tuf.roledb.py'. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.ANYKEY_SCHEMA.check_match(key) - - # Update the role's 'signing_keys' field in 'tuf.roledb.py'. - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - - if key['keyid'] in roleinfo['signing_keyids']: - roleinfo['signing_keyids'].remove(key['keyid']) - - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - else: - raise tuf.Error('Signing key not found.') - - - - def add_signature(self, signature): - """ - - Add a signature to the role. A role is considered fully signed if it - contains a threshold of signatures. The 'signature' should have been - generated by the private key corresponding to one of the role's expected - keys. - - >>> - >>> - >>> - - - signature: - The signature to be added to the role, conformant to - 'tuf.formats.SIGNATURE_SCHEMA'. - - - tuf.FormatError, if the 'signature' argument is improperly formatted. - - - Adds 'signature', if not already added, to the role's 'signatures' field - in 'tuf.roledb.py'. - - - None. - """ - - # Does 'signature' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.SIGNATURE_SCHEMA.check_match(signature) - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - - # Ensure the roleinfo contains a 'signatures' field. - if 'signatures' not in roleinfo: - roleinfo['signatures'] = [] - - # Update the role's roleinfo by adding 'signature', if it has not been - # added. - if signature not in roleinfo['signatures']: - roleinfo['signatures'].append(signature) - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - - - def remove_signature(self, signature): - """ - - Remove a previously loaded, or added, role 'signature'. A role must - contain a threshold number of signatures to be considered fully signed. - - >>> - >>> - >>> - - - signature: - The role signature to remove, conformant to - 'tuf.formats.SIGNATURE_SCHEMA'. - - - tuf.FormatError, if the 'signature' argument is improperly formatted. - - tuf.Error, if 'signature' has not been previously added to this role. - - - Updates the 'signatures' field of the role in 'tuf.roledb.py'. - - - None. - """ - - # Does 'signature' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.SIGNATURE_SCHEMA.check_match(signature) - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - - if signature in roleinfo['signatures']: - roleinfo['signatures'].remove(signature) - - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - else: - raise tuf.Error('Signature not found.') - - - - @property - def signatures(self): - """ - - A getter method that returns the role's signatures. A role is considered - fully signed if it contains a threshold number of signatures, where each - signature must be provided by the generated by the private key. Keys - are added to a role with the add_verification_key() method. - - - None. - - - None. - - - None. - - - A list of signatures, conformant to 'tuf.formats.SIGNATURES_SCHEMA'. - """ - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - signatures = roleinfo['signatures'] - - return signatures - - - - @property - def keys(self): - """ - - A getter method that returns the role's keyids of the keys. The role - is expected to eventually contain a threshold of signatures generated - by the private keys of each of the role's keys (returned here as a keyid.) - - - None. - - - None. - - - None. - - - A list of the role's keyids (i.e., keyids of the keys). - """ - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - keyids = roleinfo['keyids'] - - return keyids - - - - @property - def rolename(self): - """ - - Return the role's name. - Examples: 'root', 'timestamp', 'targets/unclaimed/django'. - - - None. - - - None. - - - None. - - - The role's name, conformant to 'tuf.formats.ROLENAME_SCHEMA'. - Examples: 'root', 'timestamp', 'targets/unclaimed/django'. - """ - - return self._rolename - - - - @property - def version(self): - """ - - A getter method that returns the role's version number, conformant to - 'tuf.formats.VERSION_SCHEMA'. - - - None. - - - None. - - - None. - - - The role's version number, conformant to 'tuf.formats.VERSION_SCHEMA'. - """ - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - version = roleinfo['version'] - - return version - - - - @version.setter - def version(self, version): - """ - - A setter method that updates the role's version number. TUF clients - download new metadata with version number greater than the version - currently trusted. New metadata start at version 1 when either write() - or write_partial() is called. Version numbers are automatically - incremented, when the write methods are called, as follows: - - 1. write_partial==True and the metadata is the first to be written. - - 2. write_partial=False (i.e., write()), the metadata was not loaded as - partially written, and a write_partial is not needed. - - >>> - >>> - >>> - - - version: - The role's version number, conformant to 'tuf.formats.VERSION_SCHEMA'. - - - tuf.FormatError, if the 'version' argument is improperly formatted. - - - Modifies the 'version' attribute of the Repository object and updates - the role's version in 'tuf.roledb.py'. - - - None. - """ - - # Does 'version' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.METADATAVERSION_SCHEMA.check_match(version) - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - roleinfo['version'] = version - - tuf.roledb.update_roleinfo(self._rolename, roleinfo) - - - - @property - def threshold(self): - """ - - Return the role's threshold value. A role is considered fully signed if - a threshold number of signatures is available. - - - None. - - - None. - - - None. - - - The role's threshold value, conformant to 'tuf.formats.THRESHOLD_SCHEMA'. - """ - - roleinfo = tuf.roledb.get_roleinfo(self._rolename) - threshold = roleinfo['threshold'] - - return threshold - - - - @threshold.setter - def threshold(self, threshold): - """ - - A setter method that modified the threshold value of the role. Metadata - is considered fully signed if a 'threshold' number of signatures is - available. - - >>> - >>> - >>> - - - threshold: - An integer value that sets the role's threshold value, or the miminum - number of signatures needed for metadata to be considered fully - signed. Conformant to 'tuf.formats.THRESHOLD_SCHEMA'. - - - tuf.FormatError, if the 'threshold' argument is improperly formatted. - - - Modifies the threshold attribute of the Repository object and updates - the roles threshold in 'tuf.roledb.py'. - - - None. - """ - - # Does 'threshold' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.THRESHOLD_SCHEMA.check_match(threshold) - - roleinfo = tuf.roledb.get_roleinfo(self._rolename) - roleinfo['threshold'] = threshold - - tuf.roledb.update_roleinfo(self._rolename, roleinfo) - - - @property - def expiration(self): - """ - - A getter method that returns the role's expiration datetime. - - >>> - >>> - >>> - - - None. - - - None. - - - None. - - - The role's expiration datetime, a datetime.datetime() object. - """ - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - expires = roleinfo['expires'] - - expires_datetime_object = iso8601.parse_date(expires) - - return expires_datetime_object - - - - @expiration.setter - def expiration(self, datetime_object): - """ - - A setter method for the role's expiration datetime. The top-level - roles have a default expiration (e.g., ROOT_EXPIRATION), but may later - be modified by this setter method. - - >>> - >>> - >>> - - - datetime_object: - The datetime expiration of the role, a datetime.datetime() object. - - - tuf.FormatError, if 'datetime_object' is not a datetime.datetime() object. - - tuf.Error, if 'datetime_object' has already expired. - - - Modifies the expiration attribute of the Repository object. - The datetime given will be truncated to microseconds = 0 - - - None. - """ - - # Is 'datetime_object' a datetime.datetime() object? - # Raise 'tuf.FormatError' if not. - if not isinstance(datetime_object, datetime.datetime): - raise tuf.FormatError(repr(datetime_object) + ' is not a' - ' datetime.datetime() object.') - - # truncate the microseconds value to produce a correct schema string - # of the form yyyy-mm-ddThh:mm:ssZ - datetime_object = datetime_object.replace(microsecond = 0) - - # Ensure the expiration has not already passed. - current_datetime_object = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time())) - - if datetime_object < current_datetime_object: - raise tuf.Error(repr(self.rolename) + ' has already expired.') - - # Update the role's 'expires' entry in 'tuf.roledb.py'. - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - expires = datetime_object.isoformat() + 'Z' - roleinfo['expires'] = expires - - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - - - @property - def signing_keys(self): - """ - - A getter method that returns a list of the role's signing keys. - - >>> - >>> - >>> - - - None. - - - None. - - - None. - - - A list of keyids of the role's signing keys, conformant to - 'tuf.formats.KEYIDS_SCHEMA'. - """ - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - signing_keyids = roleinfo['signing_keyids'] - - return signing_keyids - - - - @property - def compressions(self): - """ - - A getter method that returns a list of the file compression algorithms - used when the metadata is written to disk. If ['gz'] is set for the - 'targets.json' role, the metadata files 'targets.json' and - 'targets.json.gz' are written. - - >>> - >>> - >>> - - - None. - - - None. - - - None. - - - A list of compression algorithms, conformant to - 'tuf.formats.COMPRESSIONS_SCHEMA'. - """ - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - compressions = roleinfo['compressions'] - - return compressions - - - - @compressions.setter - def compressions(self, compression_list): - """ - - A setter method for the file compression algorithms used when the - metadata is written to disk. If ['gz'] is set for the 'targets.json' role - the metadata files 'targets.json' and 'targets.json.gz' are written. - - >>> - >>> - >>> - - - compression_list: - A list of file compression algorithms, conformant to - 'tuf.formats.COMPRESSIONS_SCHEMA'. - - - tuf.FormatError, if 'compression_list' is improperly formatted. - - - Updates the role's compression algorithms listed in 'tuf.roledb.py'. - - - None. - """ - - # Does 'compression_name' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.COMPRESSIONS_SCHEMA.check_match(compression_list) - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - - # Add the compression algorithms of 'compression_list' to the role's - # entry in 'tuf.roledb.py'. - for compression in compression_list: - if compression not in roleinfo['compressions']: - roleinfo['compressions'].append(compression) - - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - - - - -class Root(Metadata): - """ - - Represent a Root role object. The root role is responsible for - listing the public keys and threshold of all the top-level roles, including - itself. Top-level metadata is rejected if it does not comply with what is - specified by the Root role. - - This Root object sub-classes Metadata, so the expected Metadata - operations like adding/removing public keys, signatures, private keys, and - updating metadata attributes (e.g., version and expiration) is supported. - Since Root is a top-level role and must exist, a default Root object - is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - None. - - - None. - - - A 'root' role is added to 'tuf.roledb.py'. - - - None. - """ - - def __init__(self): - - super(Root, self).__init__() - - self._rolename = 'root' - - # By default, 'snapshot' metadata is set to expire 1 week from the current - # time. The expiration may be modified. - expiration = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time() + ROOT_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'consistent_snapshot': False, - 'compressions': [''], 'expires': expiration, - 'partial_loaded': False} - try: - tuf.roledb.add_role(self._rolename, roleinfo) - - except tuf.RoleAlreadyExistsError: - pass - - - - - -class Timestamp(Metadata): - """ - - Represent a Timestamp role object. The timestamp role is responsible for - referencing the latest version of the Snapshot role. Under normal - conditions, it is the only role to be downloaded from a remote repository - without a known file length and hash. An upper length limit is set, though. - Also, its signatures are also verified to be valid according to the Root - role. If invalid metadata can only be downloaded by the client, Root - is the only other role that is downloaded without a known length and hash. - This case may occur if a role's signing keys have been revoked and a newer - Root file is needed to list the updated keys. - - This Timestamp object sub-classes Metadata, so the expected Metadata - operations like adding/removing public keys, signatures, private keys, and - updating metadata attributes (e.g., version and expiration) is supported. - Since Snapshot is a top-level role and must exist, a default Timestamp object - is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - None. - - - None. - - - A 'timestamp' role is added to 'tuf.roledb.py'. - - - None. - """ - - def __init__(self): - - super(Timestamp, self).__init__() - - self._rolename = 'timestamp' - - # By default, 'snapshot' metadata is set to expire 1 week from the current - # time. The expiration may be modified. - expiration = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time() + TIMESTAMP_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'compressions': [''], - 'expires': expiration, 'partial_loaded': False} - - try: - tuf.roledb.add_role(self.rolename, roleinfo) - - except tuf.RoleAlreadyExistsError: - pass - - - - - -class Snapshot(Metadata): - """ - - Represent a Snapshot role object. The snapshot role is responsible for - referencing the other top-level roles (excluding Timestamp) and all - delegated roles. - - This Snapshot object sub-classes Metadata, so the expected - Metadata operations like adding/removing public keys, signatures, private - keys, and updating metadata attributes (e.g., version and expiration) is - supported. Since Snapshot is a top-level role and must exist, a default - Snapshot object is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - None. - - - None. - - - A 'snapshot' role is added to 'tuf.roledb.py'. - - - None. - """ - - def __init__(self): - - super(Snapshot, self).__init__() - - self._rolename = 'snapshot' - - # By default, 'snapshot' metadata is set to expire 1 week from the current - # time. The expiration may be modified. - expiration = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time() + SNAPSHOT_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'compressions': [''], - 'expires': expiration, 'partial_loaded': False} - - try: - tuf.roledb.add_role(self._rolename, roleinfo) - - except tuf.RoleAlreadyExistsError: - pass - - - - - -class Targets(Metadata): - """ - - Represent a Targets role object. Targets roles include the top-level role - 'targets.json' and all delegated roles (e.g., 'targets/unclaimed/django'). - The expected operations of Targets metadata is included, such as adding - and removing repository target files, making and revoking delegations, and - listing the target files provided by it. - - Adding or removing a delegation causes the attributes of the Targets object - to be updated. That is, if the 'django' Targets object is delegated by - 'targets/unclaimed', a new attribute is added so that the following - code statement is supported: - repository.targets('unclaimed')('django').version = 2 - - Likewise, revoking a delegation causes removal of the delegation attribute. - - This Targets object sub-classes Metadata, so the expected - Metadata operations like adding/removing public keys, signatures, private - keys, and updating metadata attributes (e.g., version and expiration) is - supported. Since Targets is a top-level role and must exist, a default - Targets object (for 'targets.json', not delegated roles) is instantiated when - a new Repository object is created. - - >>> - >>> - >>> - - - targets_directory: - The targets directory of the Repository object. - - rolename: - The rolename of this Targets object. - - roleinfo: - An already populated roleinfo object of 'rolename'. Conformant to - 'tuf.formats.ROLEDB_SCHEMA'. - - - tuf.FormatError, if the arguments are improperly formatted. - - - Modifies the roleinfo of the targets role in 'tuf.roledb', or creates - a default one named 'targets'. - - - None. - """ - - def __init__(self, targets_directory, rolename='targets', roleinfo=None, - parent_targets_object=None): - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.PATH_SCHEMA.check_match(targets_directory) - tuf.formats.ROLENAME_SCHEMA.check_match(rolename) - - if roleinfo is not None: - tuf.formats.ROLEDB_SCHEMA.check_match(roleinfo) - - super(Targets, self).__init__() - self._targets_directory = targets_directory - self._rolename = rolename - self._target_files = [] - self._delegated_roles = {} - self._parent_targets_object = self - - # Keep a reference to the top-level 'targets' object. Any delegated roles - # that may be created, can be added to and accessed via the top-level - # 'targets' object. - if parent_targets_object is not None: - self._parent_targets_object = parent_targets_object - - # By default, Targets objects are set to expire 3 months from the current - # time. May be later modified. - expiration = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time() + TARGETS_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - # If 'roleinfo' is not provided, set an initial default. - if roleinfo is None: - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'version': 0, 'compressions': [''], 'expires': expiration, - 'signatures': [], 'paths': {}, 'path_hash_prefixes': [], - 'partial_loaded': False, 'delegations': {'keys': {}, - 'roles': []}} - - # Add the new role to the 'tuf.roledb'. - try: - tuf.roledb.add_role(self.rolename, roleinfo) - - except tuf.RoleAlreadyExistsError: - pass - - - - def __call__(self, rolename): - """ - - Allow callable Targets object so that delegated roles may be referenced - by their string rolenames. Rolenames may include characters like '-' and - are not restricted to Python identifiers. - - - rolename: - The rolename of the delegated role. 'rolename' must be a role - previously delegated by this Targets role. - - - tuf.FormatError, if the arguments are improperly formatted. - - tuf.UnknownRoleError, if 'rolename' has not been delegated by this - Targets object. - - - Modifies the roleinfo of the targets role in 'tuf.roledb'. - - - The Targets object of 'rolename'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.ROLENAME_SCHEMA.check_match(rolename) - - if rolename in self._delegated_roles: - return self._delegated_roles[rolename] - - else: - raise tuf.UnknownRoleError(repr(rolename) + ' has not been delegated' - ' by ' + repr(self.rolename)) - - - - - - def add_delegated_role(self, rolename, targets_object): - """ - - Add 'targets_object' to this Targets object's list of known delegated - roles. Specifically, delegated Targets roles should call 'super(Targets, - self).add_delegated_role(...)' so that the top-level 'targets' role - contains a dictionary of all the available roles on the repository. - - - rolename: - The rolename of the delegated role. 'rolename' must be a role - previously delegated by this Targets role. - - targets_object: - A Targets() object. - - - tuf.FormatError, if the arguments are improperly formatted. - - tuf.RoleAlreadyExistsError, if 'rolename' has already been delegated by - this Targets object. - - - Updates the Target object's dictionary of delegated targets. - - - The Targets object of 'rolename'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.ROLENAME_SCHEMA.check_match(rolename) - - if not isinstance(targets_object, Targets): - raise tuf.FormatError(repr(targets_object) + ' is not a Targets object.') - - - if rolename in self._delegated_roles: - raise tuf.RoleAlreadyExistsError(repr(rolename) + ' already exists.') - - else: - self._delegated_roles[rolename] = targets_object - - - - - - @property - def target_files(self): - """ - - A getter method that returns the target files added thus far to this - Targets object. - - >>> - >>> - >>> - - - None. - - - None. - - - None. - - - None. - """ - - target_files = tuf.roledb.get_roleinfo(self._rolename)['paths'] - - return target_files - - - - def add_restricted_paths(self, list_of_directory_paths, child_rolename): - """ - - Add 'list_of_directory_paths' to the restricted paths of 'child_rolename'. - The updater client verifies the target paths specified by child roles, and - searches for targets by visiting these restricted paths. A child role may - only provide targets specifically listed in the delegations field of the - parent, or a target that falls under a restricted path. - - >>> - >>> - >>> - - - list_of_directory_paths: - A list of directory paths 'child_rolename' should also be restricted to. - - child_rolename: - The child delegation that requires an update to its restricted paths, - as listed in the parent role's delegations (e.g., 'Django' in - 'unclaimed'). - - - tuf.Error, if a directory path in 'list_of_directory_paths' is not a - directory, or not under the repository's targets directory. If - 'child_rolename' has not been delegated yet. - - - Modifies this Targets' delegations field. - - - None. - """ - - # Does 'filepath' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.PATHS_SCHEMA.check_match(list_of_directory_paths) - tuf.formats.ROLENAME_SCHEMA.check_match(child_rolename) - - # A list of verified paths to be added to the child role's entry in the - # parent's delegations. - directory_paths = [] - - # Ensure the 'child_rolename' has been delegated, otherwise it will not - # have an entry in the parent role's delegations field. - if not tuf.roledb.role_exists(child_rolename): - raise tuf.Error(repr(child_rolename) + ' has not been delegated.') - - # Are the paths in 'list_of_directory_paths' valid? - for directory_path in list_of_directory_paths: - directory_path = os.path.abspath(directory_path) - if not os.path.isdir(directory_path): - raise tuf.Error(repr(directory_path) + ' is not a directory.') - - # Are the paths in the repository's targets directory? Append a trailing - # path separator with os.path.join(path, ''). - targets_directory = os.path.join(self._targets_directory, '') - directory_path = os.path.join(directory_path, '') - if not directory_path.startswith(targets_directory): - raise tuf.Error(repr(directory_path) + ' is not under the' - ' Repository\'s targets directory: ' + repr(self._targets_directory)) - - directory_paths.append(directory_path[len(self._targets_directory):]) - - # Get the current role's roleinfo, so that its delegations field can be - # updated. - roleinfo = tuf.roledb.get_roleinfo(self._rolename) - - # Update the restricted paths of 'child_rolename'. - for role in roleinfo['delegations']['roles']: - if role['name'] == child_rolename: - restricted_paths = role['paths'] - - for directory_path in directory_paths: - if directory_path not in restricted_paths: - restricted_paths.append(directory_path) - - tuf.roledb.update_roleinfo(self._rolename, roleinfo) - - - - def add_target(self, filepath, custom=None): - """ - - Add a filepath (must be under the repository's targets directory) to the - Targets object. - - This method does not actually create 'filepath' on the file system. - 'filepath' must already exist on the file system. - - >>> - >>> - >>> - - - filepath: - The path of the target file. It must be located in the repository's - targets directory. - - custom: - An optional object providing additional information about the file. - - - tuf.FormatError, if 'filepath' is improperly formatted. - - tuf.Error, if 'filepath' is not found under the repository's targets - directory. - - - Adds 'filepath' to this role's list of targets. This role's - 'tuf.roledb.py' is also updated. - - - None. - """ - - # Does 'filepath' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.PATH_SCHEMA.check_match(filepath) - if custom is None: - custom = {} - - else: - tuf.formats.CUSTOM_SCHEMA.check_match(custom) - - filepath = os.path.abspath(filepath) - - # Ensure 'filepath' is found under the repository's targets directory. - if not filepath.startswith(self._targets_directory): - raise tuf.Error(repr(filepath) + ' is not under the Repository\'s' - ' targets directory: ' + repr(self._targets_directory)) - - # Add 'filepath' (i.e., relative to the targets directory) to the role's - # list of targets. 'filepath' will be verified as an allowed path according - # to this Targets parent role when write() is called. Not verifying - # 'filepath' here allows freedom to add targets and parent restrictions - # in any order, and minimize the number of times these checks are performed. - if os.path.isfile(filepath): - - # Update the role's 'tuf.roledb.py' entry and avoid duplicates. - targets_directory_length = len(self._targets_directory) - roleinfo = tuf.roledb.get_roleinfo(self._rolename) - relative_path = filepath[targets_directory_length:] - if relative_path not in roleinfo['paths']: - roleinfo['paths'].update({relative_path: custom}) - tuf.roledb.update_roleinfo(self._rolename, roleinfo) - - else: - raise tuf.Error(repr(filepath) + ' is not a valid file.') - - - - def add_targets(self, list_of_targets): - """ - - Add a list of target filepaths (all relative to 'self.targets_directory'). - This method does not actually create files on the file system. The - list of target must already exist. - - >>> - >>> - >>> - - - list_of_targets: - A list of target filepaths that are added to the paths of this Targets - object. - - - tuf.FormatError, if the arguments are improperly formatted. - - tuf.Error, if any of the paths listed in 'list_of_targets' is not found - under the repository's targets directory or is invalid. - - - This Targets' roleinfo is updated with the paths in 'list_of_targets'. - - - None. - """ - - # Does 'list_of_targets' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.RELPATHS_SCHEMA.check_match(list_of_targets) - - # Update the tuf.roledb entry. - targets_directory_length = len(self._targets_directory) - relative_list_of_targets = [] - - # Ensure the paths in 'list_of_targets' are valid and fall under the - # repository's targets directory. The paths of 'list_of_targets' will be - # verified as allowed paths according to this Targets parent role when - # write() is called. Not verifying filepaths here allows the freedom to add - # targets and parent restrictions in any order, and minimize the number of - # times these checks are performed. - for target in list_of_targets: - filepath = os.path.abspath(target) - - if not filepath.startswith(self._targets_directory+os.sep): - raise tuf.Error(repr(filepath) + ' is not under the Repository\'s' - ' targets directory: ' + repr(self._targets_directory)) - - if os.path.isfile(filepath): - relative_list_of_targets.append(filepath[targets_directory_length:]) - - else: - raise tuf.Error(repr(filepath) + ' is not a valid file.') - - # Update this Targets 'tuf.roledb.py' entry. - roleinfo = tuf.roledb.get_roleinfo(self._rolename) - for relative_target in relative_list_of_targets: - if relative_target not in roleinfo['paths']: - roleinfo['paths'].update({relative_target: {}}) - - else: - continue - - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - - - def remove_target(self, filepath): - """ - - Remove the target 'filepath' from this Targets' 'paths' field. 'filepath' - is relative to the targets directory. - - >>> - >>> - >>> - - - filepath: - The target to remove from this Targets object, relative to the - repository's targets directory. - - - tuf.FormatError, if 'filepath' is improperly formatted. - - tuf.Error, if 'filepath' is not under the repository's targets directory, - or not found. - - - Modifies this Targets 'tuf.roledb.py' entry. - - - None. - """ - - # Does 'filepath' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.RELPATH_SCHEMA.check_match(filepath) - - filepath = os.path.abspath(filepath) - targets_directory_length = len(self._targets_directory) - - # Ensure 'filepath' is under the repository targets directory. - if not filepath.startswith(self._targets_directory+os.sep): - raise tuf.Error(repr(filepath) + ' is not under the Repository\'s' - ' targets directory: ' + repr(self._targets_directory)) - - # The relative filepath is listed in 'paths'. - relative_filepath = filepath[targets_directory_length:] - - # Remove 'relative_filepath', if found, and update this Targets roleinfo. - fileinfo = tuf.roledb.get_roleinfo(self.rolename) - if relative_filepath in fileinfo['paths']: - del fileinfo['paths'][relative_filepath] - tuf.roledb.update_roleinfo(self.rolename, fileinfo) - - else: - raise tuf.Error('Target file path not found.') - - - - def clear_targets(self): - """ - - Remove all the target filepaths in the "paths" field of this Targets. - - >>> - >>> - >>> - - - None - - - None. - - - Modifies this Targets' 'tuf.roledb.py' entry. - - - None. - """ - - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - roleinfo['paths'] = {} - - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - - - - - def get_delegated_rolenames(self): - """ - - Return all delegations of a role. If ['a/b/', 'a/b/c/', 'a/b/c/d'] have - been delegated by the delegated role 'django', - repository.targets('django').get_delegated_rolenames() returns: ['a/b', - 'a/b/c', 'a/b/c/d']. - - - None. - - - None. - - - None. - - - A list of rolenames. - """ - - return tuf.roledb.get_delegated_rolenames(self.rolename) - - - - - - def delegate(self, rolename, public_keys, list_of_targets, threshold=1, - backtrack=True, restricted_paths=None, path_hash_prefixes=None): - """ - - Create a new delegation, where 'rolename' is a child delegation of this - Targets object. The keys and roles database is updated, including the - delegations field of this Targets. The delegation of 'rolename' is added - and accessible (i.e., repository.targets(rolename)). - - Actual metadata files are not create, only when repository.write() or - repository.write_partial() is called. - - >>> - >>> - >>> - - - rolename: - The name of the delegated role, as in 'django' or 'unclaimed'. - - public_keys: - A list of TUF key objects in 'ANYKEYLIST_SCHEMA' format. The list - may contain any of the supported key types: RSAKEY_SCHEMA, - ED25519KEY_SCHEMA, etc. - - list_of_targets: - A list of target filepaths that are added to the paths of 'rolename'. - 'list_of_targets' is a list of target filepaths, and can be empty. - - threshold: - The threshold number of keys of 'rolename'. - - backtrack: - Boolean that indicates whether this role allows the updater client - to continue searching for targets (target files it is trusted to list - but has not yet specified) in other delegations. If 'backtrack' is - False and 'updater.target()' does not find 'example_target.tar.gz' in - this role, a 'tuf.UnknownTargetError' exception should be raised. If - 'backtrack' is True (default), and 'target/other_role' is also trusted - with 'example_target.tar.gz' and has listed it, updater.target() - should backtrack and return the target file specified by - 'target/other_role'. - - restricted_paths: - A list of restricted directory or file paths of 'rolename'. Any target - files added to 'rolename' must fall under one of the restricted paths. - - path_hash_prefixes: - A list of hash prefixes in 'tuf.formats.PATH_HASH_PREFIXES_SCHEMA' - format, used in hashed bin delegations. Targets may be located and - stored in hashed bins by calculating the target path's hash prefix. - - - tuf.FormatError, if any of the arguments are improperly formatted. - - tuf.Error, if the delegated role already exists or if any of the arguments - is an invalid path (i.e., not under the repository's targets directory). - - - A new Target object is created for 'rolename' that is accessible to the - caller (i.e., targets.). The 'tuf.keydb.py' and - 'tuf.roledb.py' stores are updated with 'public_keys'. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.ROLENAME_SCHEMA.check_match(rolename) - tuf.formats.ANYKEYLIST_SCHEMA.check_match(public_keys) - tuf.formats.RELPATHS_SCHEMA.check_match(list_of_targets) - tuf.formats.THRESHOLD_SCHEMA.check_match(threshold) - tuf.formats.BOOLEAN_SCHEMA.check_match(backtrack) - - if restricted_paths is not None: - tuf.formats.RELPATHS_SCHEMA.check_match(restricted_paths) - - if path_hash_prefixes is not None: - tuf.formats.PATH_HASH_PREFIXES_SCHEMA.check_match(path_hash_prefixes) - - # Check if 'rolename' is not already a delegation. - if tuf.roledb.role_exists(rolename): - raise tuf.Error(repr(rolename) + ' already delegated.') - - # Keep track of the valid keyids (added to the new Targets object) and - # their keydicts (added to this Targets delegations). - keyids = [] - keydict = {} - - # Add all the keys of 'public_keys' to tuf.keydb. - for key in public_keys: - keyid = key['keyid'] - key_metadata_format = tuf.keys.format_keyval_to_metadata(key['keytype'], - key['keyval']) - # Update 'keyids' and 'keydict'. - new_keydict = {keyid: key_metadata_format} - keydict.update(new_keydict) - keyids.append(keyid) - - # Ensure the paths of 'list_of_targets' all fall under the repository's - # targets. - relative_targetpaths = {} - targets_directory_length = len(self._targets_directory) - - for target in list_of_targets: - target = os.path.abspath(target) - if not target.startswith(self._targets_directory+os.sep): - raise tuf.Error(repr(target) + ' is not under the Repository\'s' - ' targets directory: ' + repr(self._targets_directory)) - - relative_targetpaths.update({target[targets_directory_length:]: {}}) - - # Ensure the paths of 'restricted_paths' all fall under the repository's - # targets. - relative_restricted_paths = [] - - if restricted_paths is not None: - for path in restricted_paths: - path = os.path.abspath(path) + os.sep - if not path.startswith(self._targets_directory + os.sep): - raise tuf.Error(repr(path) + ' is not under the Repository\'s' - ' targets directory: ' +repr(self._targets_directory)) - - # Append a trailing path separator with os.path.join(path, ''). - path = os.path.join(path, '') - relative_restricted_paths.append(path[targets_directory_length:]) - - # Create a new Targets object for the 'rolename' delegation. An initial - # expiration is set (3 months from the current time). - expiration = \ - tuf.formats.unix_timestamp_to_datetime(int(time.time() + TARGETS_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'name': rolename, 'keyids': keyids, 'signing_keyids': [], - 'threshold': threshold, 'version': 0, 'compressions': [''], - 'expires': expiration, 'signatures': [], 'partial_loaded': False, - 'paths': relative_targetpaths, 'delegations': {'keys': {}, - 'roles': []}} - - # The new targets object is added as an attribute to this Targets object. - new_targets_object = Targets(self._targets_directory, rolename, - roleinfo, parent_targets_object=self) - - # Update the 'delegations' field of the current role. - current_roleinfo = tuf.roledb.get_roleinfo(self.rolename) - current_roleinfo['delegations']['keys'].update(keydict) - - # Update the roleinfo of this role. A ROLE_SCHEMA object requires only - # 'keyids', 'threshold', and 'paths'. - roleinfo = {'name': rolename, - 'keyids': roleinfo['keyids'], - 'threshold': roleinfo['threshold'], - 'backtrack': backtrack, - 'paths': list(roleinfo['paths'].keys())} - - if restricted_paths is not None: - roleinfo['paths'] = relative_restricted_paths - - if path_hash_prefixes is not None: - roleinfo['path_hash_prefixes'] = path_hash_prefixes - # A role in a delegations must list either 'path_hash_prefixes' - # or 'paths'. - del roleinfo['paths'] - - current_roleinfo['delegations']['roles'].append(roleinfo) - tuf.roledb.update_roleinfo(self.rolename, current_roleinfo) - - # Update the public keys of 'new_targets_object'. - for key in public_keys: - new_targets_object.add_verification_key(key) - - # Add the new delegation to the top-level 'targets' role object (i.e., - # 'repository.targets()'). For example, 'django', which was delegated by - # repository.target('claimed'), is added to 'repository.targets('django')). - - # Add 'new_targets_object' to the 'targets' role object (this object). - if self.rolename == 'targets': - self.add_delegated_role(rolename, new_targets_object) - - else: - self._parent_targets_object.add_delegated_role(rolename, new_targets_object) - - - - - - def revoke(self, rolename): - """ - - Revoke this Targets' 'rolename' delegation. Its 'rolename' attribute is - deleted, including the entries in its 'delegations' field and in - 'tuf.roledb'. - - Actual metadata files are not updated, only when repository.write() or - repository.write_partial() is called. - - >>> - >>> - >>> - - - rolename: - The rolename (e.g., 'Django' in 'django') of the child delegation the - parent role (this role) wants to revoke. - - - tuf.FormatError, if 'rolename' is improperly formatted. - - - The delegations dictionary of 'rolename' is modified, and its 'tuf.roledb' - entry is updated. This Targets' 'rolename' delegation attribute is also - deleted. - - - None. - """ - - # Does 'rolename' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.ROLENAME_SCHEMA.check_match(rolename) - - # Remove 'rolename' from this Target's delegations dict. - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - - for role in roleinfo['delegations']['roles']: - if role['name'] == rolename: - roleinfo['delegations']['roles'].remove(role) - - tuf.roledb.update_roleinfo(self.rolename, roleinfo) - - # Remove 'rolename' from 'tuf.roledb.py'. - tuf.roledb.remove_role(rolename) - - # Remove the rolename delegation from the current role. For example, the - # 'django' role is removed from repository.targets('django'). - del self._delegated_roles[rolename] - - - - def delegate_hashed_bins(self, list_of_targets, keys_of_hashed_bins, - number_of_bins=1024): - """ - - Distribute a large number of target files over multiple delegated roles - (hashed bins). The metadata files of delegated roles will be nearly - equal in size (i.e., 'list_of_targets' is uniformly distributed by - calculating the target filepath's hash and determing which bin it should - reside in. The updater client will use "lazy bin walk" to find a target - file's hashed bin destination. The parent role lists a range of path - hash prefixes each hashed bin contains. This method is intended for - repositories with a large number of target files, a way of easily - distributing and managing the metadata that lists the targets, and - minimizing the number of metadata files (and their size) downloaded by - the client. See tuf-spec.txt and the following link for more - information: - http://www.python.org/dev/peps/pep-0458/#metadata-scalability - - >>> - >>> - >>> - - - list_of_targets: - The target filepaths of the targets that should be stored in hashed - bins created (i.e., delegated roles). A repository object's - get_filepaths_in_directory() can generate a list of valid target - paths. - - keys_of_hashed_bins: - The initial public keys of the delegated roles. Public keys may be - later added or removed by calling the usual methods of the delegated - Targets object. For example: - repository.targets('000-003').add_verification_key() - - number_of_bins: - The number of delegated roles, or hashed bins, that should be generated - and contain the target file attributes listed in 'list_of_targets'. - 'number_of_bins' must be a power of 2. Each bin may contain a - range of path hash prefixes (e.g., target filepath digests that range - from [000]... - [003]..., where the series of digits in brackets is - considered the hash prefix). - - - tuf.FormatError, if the arguments are improperly formatted. - - tuf.Error, if 'number_of_bins' is not a power of 2, or one of the targets - in 'list_of_targets' is not located under the repository's targets - directory. - - - Delegates multiple target roles from the current parent role. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.PATHS_SCHEMA.check_match(list_of_targets) - tuf.formats.ANYKEYLIST_SCHEMA.check_match(keys_of_hashed_bins) - tuf.formats.NUMBINS_SCHEMA.check_match(number_of_bins) - - # Convert 'number_of_bins' to hexadecimal and determine the number of - # hexadecimal digits needed by each hash prefix. Calculate the total - # number of hash prefixes (e.g., 000 - FFF total values) to be spread over - # 'number_of_bins' and strip the first two characters ('0x') from Python's - # representation of hexadecimal values (so that they are not used in the - # calculation of the prefix length.) Example: number_of_bins = 32, - # total_hash_prefixes = 256, and each hashed bin is responsible for 8 hash - # prefixes. Hashed bin roles created = 00-07.json, 08-0f.json, ..., - # f8-ff.json. - prefix_length = len(hex(number_of_bins - 1)[2:]) - total_hash_prefixes = 16 ** prefix_length - - # For simplicity, ensure that 'total_hash_prefixes' (16 ^ n) can be evenly - # distributed over 'number_of_bins' (must be 2 ^ n). Each bin will contain - # (total_hash_prefixes / number_of_bins) hash prefixes. - if total_hash_prefixes % number_of_bins != 0: - raise tuf.Error('The "number_of_bins" argument must be a power of 2.') - - logger.info('Creating hashed bin delegations.') - logger.info(repr(len(list_of_targets)) + ' total targets.') - logger.info(repr(number_of_bins) + ' hashed bins.') - logger.info(repr(total_hash_prefixes) + ' total hash prefixes.') - - # Store the target paths that fall into each bin. The digest of the - # target path, reduced to the first 'prefix_length' hex digits, is - # calculated to determine which 'bin_index' it should go. - target_paths_in_bin = {} - for bin_index in six.moves.xrange(total_hash_prefixes): - target_paths_in_bin[bin_index] = [] - - # Assign every path to its bin. Ensure every target is located under the - # repository's targets directory. - for target_path in list_of_targets: - target_path = os.path.abspath(target_path) - if not target_path.startswith(self._targets_directory+os.sep): - raise tuf.Error('A path in the list of targets argument is not' - ' under the repository\'s targets directory: ' + repr(target_path)) - - # Determine the hash prefix of 'target_path' by computing the digest of - # its path relative to the targets directory. Example: - # '{repository_root}/targets/file1.txt' -> 'file1.txt'. - relative_path = target_path[len(self._targets_directory):] - digest_object = tuf.hash.digest(algorithm=HASH_FUNCTION) - digest_object.update(relative_path.encode('utf-8')) - relative_path_hash = digest_object.hexdigest() - relative_path_hash_prefix = relative_path_hash[:prefix_length] - - # 'target_paths_in_bin' store bin indices in base-10, so convert the - # 'relative_path_hash_prefix' base-16 (hex) number to a base-10 (dec) - # number. - bin_index = int(relative_path_hash_prefix, 16) - - # Add the 'target_path' (absolute) to the bin. These target paths are - # later added to the targets of the 'bin_index' role. - target_paths_in_bin[bin_index].append(target_path) - - # Calculate the path hash prefixes of each 'bin_offset' stored in the parent - # role. For example: 'targets/unclaimed/000-003' may list the path hash - # prefixes "000", "001", "002", "003" in the delegations dict of - # 'targets/unclaimed'. - bin_offset = total_hash_prefixes // number_of_bins - - logger.info('Each bin ranges over ' + repr(bin_offset) + ' hash prefixes.') - - # The parent roles will list bin roles starting from "0" to - # 'total_hash_prefixes' in 'bin_offset' increments. The skipped bin roles - # are listed in 'path_hash_prefixes' of 'outer_bin_index'. - for outer_bin_index in six.moves.xrange(0, total_hash_prefixes, bin_offset): - # The bin index is hex padded from the left with zeroes for up to the - # 'prefix_length' (e.g., '000-003'). Ensure the correct hash bin name is - # generated if a prefix range is unneeded. - start_bin = hex(outer_bin_index)[2:].zfill(prefix_length) - end_bin = hex(outer_bin_index+bin_offset-1)[2:].zfill(prefix_length) - if start_bin == end_bin: - bin_rolename = start_bin - - else: - bin_rolename = start_bin + '-' + end_bin - - # 'bin_rolename' may contain a range of target paths, from 'start_bin' to - # 'end_bin'. Determine the total target paths that should be included. - path_hash_prefixes = [] - bin_rolename_targets = [] - - for inner_bin_index in six.moves.xrange(outer_bin_index, outer_bin_index+bin_offset): - # 'inner_bin_rolename' needed in padded hex. For example, "00b". - inner_bin_rolename = hex(inner_bin_index)[2:].zfill(prefix_length) - path_hash_prefixes.append(inner_bin_rolename) - bin_rolename_targets.extend(target_paths_in_bin[inner_bin_index]) - - # Delegate from the "unclaimed" targets role to each 'bin_rolename' - # (i.e., outer_bin_index). - self.delegate(bin_rolename, keys_of_hashed_bins, - list_of_targets=bin_rolename_targets, - path_hash_prefixes=path_hash_prefixes) - logger.debug('Delegated from ' + repr(self.rolename) + ' to ' + repr(bin_rolename)) - - - - def add_target_to_bin(self, target_filepath): - """ - - Add the fileinfo of 'target_filepath' to the expected hashed bin, if - the bin is available. The hashed bin should have been created by - {targets_role}.delegate_hashed_bins(). Assuming the target filepath - falls under the repository's targets directory, determine the filepath's - hash prefix, locate the expected bin (if any), and then add the fileinfo - to the expected bin. Example: 'targets/foo.tar.gz' may be added to - the 'targets/unclaimed/58-5f.json' role's list of targets by calling this - method. - - - target_filepath: - The filepath of the target to be added to a hashed bin. The filepath - must fall under repository's targets directory. - - - tuf.FormatError, if 'target_filepath' is improperly formatted. - - tuf.Error, if 'target_filepath' cannot be added to a hashed bin - (e.g., an invalid target filepath, or the expected hashed bin does not - exist.) - - - The fileinfo of 'target_filepath' is added to a hashed bin of this Targets - object. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.PATH_SCHEMA.check_match(target_filepath) - - return self._locate_and_update_target_in_bin(target_filepath, 'add_target') - - - - def remove_target_from_bin(self, target_filepath): - """ - - Remove the fileinfo of 'target_filepath' from the expected hashed bin, if - the bin is available. The hashed bin should have been created by - {targets_role}.delegate_hashed_bins(). Assuming the target filepath - falls under the repository's targets directory, determine the filepath's - hash prefix, locate the expected bin (if any), and then remove the - fileinfo from the expected bin. Example: 'targets/foo.tar.gz' may be - removed from the '58-5f.json' role's list of targets by - calling this method. - - - target_filepath: - The filepath of the target to be added to a hashed bin. The filepath - must fall under repository's targets directory. - - - tuf.FormatError, if 'target_filepath' is improperly formatted. - - tuf.Error, if 'target_filepath' cannot be removed from a hashed bin - (e.g., an invalid target filepath, or the expected hashed bin does not - exist.) - - - The fileinfo of 'target_filepath' is removed from a hashed bin of this - Targets object. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.PATH_SCHEMA.check_match(target_filepath) - - return self._locate_and_update_target_in_bin(target_filepath, 'remove_target') - - - - def _locate_and_update_target_in_bin(self, target_filepath, method_name): - """ - - Assuming the target filepath falls under the repository's targets - directory, determine the filepath's hash prefix, locate the expected bin - (if any), and then call the 'method_name' method of the expected hashed - bin role. - - - target_filepath: - The filepath of the target that may be specified in one of the hashed - bins. 'target_filepath' must fall under repository's targets directory. - - method_name: - A supported method, in string format, of the Targets() class. For - example, 'add_target' and 'remove_target'. If 'target_filepath' were - to be manually added or removed from a bin: - - repository.targets('58-f7').add_target(target_filepath) - repository.targets('000-007').remove_target(target_filepath) - - - tuf.Error, if 'target_filepath' cannot be updated (e.g., an invalid target - filepath, or the expected hashed bin does not exist.) - - - The fileinfo of 'target_filepath' is added to a hashed bin of this Targets - object. - - - None. - """ - - # Determine the prefix length of any one of the hashed bins. The prefix - # length is not stored in the roledb, so it must be determined here by - # inspecting one of path hash prefixes listed. - roleinfo = tuf.roledb.get_roleinfo(self.rolename) - prefix_length = 0 - delegation = None - - # Set 'delegation' if this Targets role has performed any delegations. - if len(roleinfo['delegations']['roles']): - delegation = roleinfo['delegations']['roles'][0] - - else: - raise tuf.Error(self.rolename + ' has not delegated to any roles.') - - # Set 'prefix_length' if this Targets object has delegated to hashed bins, - # otherwise raise an exception. - if 'path_hash_prefixes' in delegation and len(delegation['path_hash_prefixes']): - prefix_length = len(delegation['path_hash_prefixes'][0]) - - else: - raise tuf.Error(self.rolename + ' has not delegated to hashed bins.') - - # Ensure the filepath falls under the repository's targets directory. - filepath = os.path.abspath(target_filepath) - if not filepath.startswith(self._targets_directory + os.sep): - raise tuf.Error(repr(filepath) + ' is not under the Repository\'s' - ' targets directory: ' + repr(self._targets_directory)) - - # Determine the hash prefix of 'target_path' by computing the digest of - # its path relative to the targets directory. Example: - # '{repository_root}/targets/file1.txt' -> '/file1.txt'. - relative_path = filepath[len(self._targets_directory):] - digest_object = tuf.hash.digest(algorithm=HASH_FUNCTION) - digest_object.update(relative_path.encode('utf-8')) - path_hash = digest_object.hexdigest() - path_hash_prefix = path_hash[:prefix_length] - - # Search for 'path_hash_prefix', and if found, extract the hashed bin's - # rolename. The hashed bin name is needed so that 'target_filepath' can be - # added to the Targets object of the hashed bin. - hashed_bin_name = None - for delegation in roleinfo['delegations']['roles']: - if path_hash_prefix in delegation['path_hash_prefixes']: - hashed_bin_name = delegation['name'] - break - - else: - continue - - # 'self._delegated_roles' is keyed by relative rolenames, so update - # 'hashed_bin_name'. - if hashed_bin_name is not None: - hashed_bin_name = hashed_bin_name[len(self.rolename) + 1:] - - # 'method_name' should be one of the supported methods of the Targets() - # class. - getattr(self._delegated_roles[hashed_bin_name], method_name)(target_filepath) - - else: - raise tuf.Error(target_filepath + ' not found in any of the bins.') - - - - @property - def delegations(self): - """ - - A getter method that returns the delegations made by this Targets role. - - >>> - >>> - >>> - - - None. - - - tuf.UnknownRoleError, if this Targets' rolename does not exist in - 'tuf.roledb'. - - - None. - - - A list containing the Targets objects of this Targets' delegations. - """ - - return list(self._delegated_roles.values()) - - - - - -def create_new_repository(repository_directory): - """ - - Create a new repository, instantiate barebones metadata for the top-level - roles, and return a Repository object. On disk, create_new_repository() - only creates the directories needed to hold the metadata and targets files. - The repository object returned may be modified to update the newly created - repository. The methods of the returned object may be called to create - actual repository files (e.g., repository.write()). - - - repository_directory: - The directory that will eventually hold the metadata and target files of - the TUF repository. - - - tuf.FormatError, if the arguments are improperly formatted. - - - The 'repository_directory' is created if it does not exist, including its - metadata and targets sub-directories. - - - A 'tuf.repository_tool.Repository' object. - """ - - # Does 'repository_directory' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.PATH_SCHEMA.check_match(repository_directory) - - # Set the repository, metadata, and targets directories. These directories - # are created if they do not exist. - repository_directory = os.path.abspath(repository_directory) - metadata_directory = None - targets_directory = None - - # Try to create 'repository_directory' if it does not exist. - try: - logger.info('Creating ' + repr(repository_directory)) - os.makedirs(repository_directory) - - # 'OSError' raised if the leaf directory already exists or cannot be created. - # Check for case where 'repository_directory' has already been created. - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise - - # Set the metadata and targets directories. The metadata directory is a - # staged one so that the "live" repository is not affected. The - # staged metadata changes may be moved over to "live" after all updated - # have been completed. - metadata_directory = \ - os.path.join(repository_directory, METADATA_STAGED_DIRECTORY_NAME) - targets_directory = \ - os.path.join(repository_directory, TARGETS_DIRECTORY_NAME) - - # Try to create the metadata directory that will hold all of the metadata - # files, such as 'root.json' and 'snapshot.json'. - try: - logger.info('Creating ' + repr(metadata_directory)) - os.mkdir(metadata_directory) - - # 'OSError' raised if the leaf directory already exists or cannot be created. - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise - - # Try to create the targets directory that will hold all of the target files. - try: - logger.info('Creating ' + repr(targets_directory)) - os.mkdir(targets_directory) - - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise - - # Create the bare bones repository object, where only the top-level roles - # have been set and contain default values (e.g., Root roles has a threshold - # of 1, expires 1 year into the future, etc.) - repository = Repository(repository_directory, metadata_directory, - targets_directory) - - return repository - - - - - -def load_repository(repository_directory): - """ - - Return a repository object containing the contents of metadata files loaded - from the repository. - - - repository_directory: - - - tuf.FormatError, if 'repository_directory' or any of the metadata files - are improperly formatted. - - tuf.RepositoryError, if the Root role cannot be found. At a minimum, - a repository must contain 'root.json' - - - All the metadata files found in the repository are loaded and their contents - stored in a repository_tool.Repository object. - - - repository_tool.Repository object. - """ - - # Does 'repository_directory' have the correct format? - # Raise 'tuf.FormatError' if there is a mismatch. - tuf.formats.PATH_SCHEMA.check_match(repository_directory) - - # Load top-level metadata. - repository_directory = os.path.abspath(repository_directory) - metadata_directory = os.path.join(repository_directory, - METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, - TARGETS_DIRECTORY_NAME) - - # The Repository() object loaded (i.e., containing all the metadata roles - # found) and returned. - repository = Repository(repository_directory, metadata_directory, - targets_directory) - - filenames = repo_lib.get_metadata_filenames(metadata_directory) - - # The Root file is always available without a version number (a consistent - # snapshot) attached to the filename. Store the 'consistent_snapshot' value - # and read the loaded Root file so that other metadata files may be located. - consistent_snapshot = False - - # Load the metadata of the top-level roles (i.e., Root, Timestamp, Targets, - # and Snapshot). - repository, consistent_snapshot = repo_lib._load_top_level_metadata(repository, - filenames) - - # Load delegated targets metadata. - # Extract the fileinfo of all the role files found in the metadata directory. - # This information is stored in the 'meta' field of the snapshot - # metadata object. - targets_objects = {} - loaded_metadata = [] - targets_objects['targets'] = repository.targets - targets_metadata_directory = os.path.join(metadata_directory, - TARGETS_DIRECTORY_NAME) - if os.path.exists(targets_metadata_directory) and \ - os.path.isdir(targets_metadata_directory): - for root, directories, files in os.walk(targets_metadata_directory): - - # 'files' here is a list of target file names. - for basename in files: - metadata_path = os.path.join(root, basename) - metadata_name = \ - metadata_path[len(metadata_directory):].lstrip(os.path.sep) - - # Strip the version number if 'consistent_snapshot' is True. - # Example: '10.django.json' --> 'django.json' - metadata_name, version_number_junk = \ - repo_lib._strip_version_number(metadata_name, - consistent_snapshot) - - if metadata_name.endswith(METADATA_EXTENSION): - extension_length = len(METADATA_EXTENSION) - metadata_name = metadata_name[:-extension_length] - - else: - continue - - # Keep a store metadata previously loaded metadata to prevent - # re-loading duplicate versions. Duplicate versions may occur with - # 'consistent_snapshot', where the same metadata may be available in - # multiples files (the different hash is included in each filename. - if metadata_name in loaded_metadata: - continue - - signable = None - try: - signable = tuf.util.load_json_file(metadata_path) - - except (ValueError, IOError): - continue - - metadata_object = signable['signed'] - - # Extract the metadata attributes of 'metadata_name' and update its - # corresponding roleinfo. - roleinfo = tuf.roledb.get_roleinfo(metadata_name) - roleinfo['signatures'].extend(signable['signatures']) - roleinfo['version'] = metadata_object['version'] - roleinfo['expires'] = metadata_object['expires'] - for filepath, fileinfo in six.iteritems(metadata_object['targets']): - roleinfo['paths'].update({filepath: fileinfo.get('custom', {})}) - roleinfo['delegations'] = metadata_object['delegations'] - - if os.path.exists(metadata_path + '.gz'): - roleinfo['compressions'].append('gz') - - # The roleinfo of 'metadata_name' should have been initialized with - # defaults when it was loaded from its parent role. - if repo_lib._metadata_is_partially_loaded(metadata_name, signable, roleinfo): - roleinfo['partial_loaded'] = True - - tuf.roledb.update_roleinfo(metadata_name, roleinfo) - loaded_metadata.append(metadata_name) - - # Generate the Targets objects of the delegated roles of - # 'metadata_name' and update the parent role Targets object. - new_targets_object = Targets(targets_directory, metadata_name, roleinfo) - targets_object = \ - targets_objects[tuf.roledb.get_parent_rolename(metadata_name)] - targets_objects[metadata_name] = new_targets_object - - targets_object._delegated_roles[(os.path.basename(metadata_name))] = \ - new_targets_object - - # Extract the keys specified in the delegations field of the Targets - # role. Add 'key_object' to the list of recognized keys. Keys may be - # shared, so do not raise an exception if 'key_object' has already been - # added. In contrast to the methods that may add duplicate keys, do not - # log a warning here as there may be many such duplicate key warnings. - # The repository maintainer should have also been made aware of the - # duplicate key when it was added. - for key_metadata in six.itervalues(metadata_object['delegations']['keys']): - key_object = tuf.keys.format_metadata_to_key(key_metadata) - try: - tuf.keydb.add_key(key_object) - - except tuf.KeyAlreadyExistsError: - pass - - # Add the delegated role's initial roleinfo, to be fully populated - # when its metadata file is next loaded in the os.walk() iteration. - for role in metadata_object['delegations']['roles']: - rolename = role['name'] - roleinfo = {'name': role['name'], 'keyids': role['keyids'], - 'threshold': role['threshold'], - 'compressions': [''], 'signing_keyids': [], - 'signatures': [], - 'paths': {}, - 'partial_loaded': False, - 'delegations': {'keys': {}, - 'roles': []}} - tuf.roledb.add_role(rolename, roleinfo) - - return repository - - - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running repository_tool.py as a standalone module: - # $ python repository_tool.py. - import doctest - doctest.testmod() diff --git a/tests/test_indefinite_freeze_attack.py b/tests/test_indefinite_freeze_attack.py index f86a48271e..8c013ef96c 100755 --- a/tests/test_indefinite_freeze_attack.py +++ b/tests/test_indefinite_freeze_attack.py @@ -289,18 +289,18 @@ def test_with_tuf(self): 'password') repository.snapshot.load_signing_key(snapshot_private) - # Expire snapshot in 8s. This should be far enough into the future that we + # Expire snapshot in 10s. This should be far enough into the future that we # haven't reached it before the first refresh validates timestamp expiry. # We want a successful refresh before expiry, then a second refresh after # expiry (which we then expect to raise an exception due to expired # metadata). - expiry_time = time.time() + 8 + expiry_time = time.time() + 10 datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time)) repository.snapshot.expiration = datetime_object # Now write to the repository. - repository.write() + repository.writeall() # And move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -369,7 +369,7 @@ def test_with_tuf(self): expiry_time = time.time() + 1 datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time)) repository.timestamp.expiration = datetime_object - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) diff --git a/tests/test_key_revocation.py b/tests/test_key_revocation_integration.py similarity index 95% rename from tests/test_key_revocation.py rename to tests/test_key_revocation_integration.py index 5399bf3769..529fe4f4e4 100755 --- a/tests/test_key_revocation.py +++ b/tests/test_key_revocation_integration.py @@ -2,7 +2,7 @@ """ - test_key_revocation.py + test_key_revocation_integration.py Vladimir Diaz. @@ -207,7 +207,7 @@ def test_timestamp_key_revocation(self): repository.root.load_signing_key(self.role_keys['root']['private']) repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['snapshot']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. @@ -258,7 +258,7 @@ def test_snapshot_key_revocation(self): # Note: we added Timetamp's key to the Snapshot role. repository.snapshot.load_signing_key(self.role_keys['timestamp']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. @@ -311,7 +311,7 @@ def test_targets_key_revocation(self): repository.targets.load_signing_key(self.role_keys['timestamp']['private']) repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. @@ -351,7 +351,6 @@ def test_root_key_revocation(self): # Remove 'root_keyid' and add a new key. Verify that the client detects # the removal and addition of keys to the Root file. repository = repo_tool.load_repository(self.repository_directory) - repository.root.remove_verification_key(self.role_keys['root']['public']) repository.root.add_verification_key(self.role_keys['snapshot']['public']) repository.root.add_verification_key(self.role_keys['targets']['public']) @@ -364,12 +363,15 @@ def test_root_key_revocation(self): repository.root.load_signing_key(self.role_keys['targets']['private']) repository.root.load_signing_key(self.role_keys['timestamp']['private']) - # Note: we added Timetamp's key to the Root role. + # Note: We added the Snapshot, Targets, and Timetamp keys to the Root role. + # The Root's expected private key has not been loaded yet, so that + # we can verify that refresh() correctly raises a tuf.BadSignatureError + # exception. repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - - # Root's version number = 2 after the following write(). - repository.write() + + # Root's version number = 2 after the following writeall(). + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -385,56 +387,61 @@ def test_root_key_revocation(self): for mirror_exception in exception.mirror_errors.values(): self.assertTrue(isinstance(mirror_exception, tuf.BadSignatureError)) - # Load the previous Root signing key so that the the client can update - # successfully. + repository.root.add_verification_key(self.role_keys['root']['public']) repository.root.load_signing_key(self.role_keys['root']['private']) - repository.write() + + # root, snapshot, and timestamp should be dirty + repository.dirty_roles() + repository.write('root', increment_version_number=False) + repository.write('snapshot') + repository.write('timestamp') # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), os.path.join(self.repository_directory, 'metadata')) - # Root's version number = 3... + # Root's version number = 2... # The client successfully performs a refresh of top-level metadata to get # the latest changes. self.repository_updater.refresh() - self.assertEqual(self.repository_updater.metadata['current']['root']['version'], 3) + self.assertEqual(self.repository_updater.metadata['current']['root']['version'], 2) # Revoke the snapshot and targets keys (added to root) so that multiple # snapshots are created. Discontinue signing with the old root key now # that the client has successfully updated (note: the old Root key # was revoked, but the repository continued signing with it to allow # the client to update). + repository.root.remove_verification_key(self.role_keys['root']['public']) repository.root.unload_signing_key(self.role_keys['root']['private']) repository.root.remove_verification_key(self.role_keys['snapshot']['public']) repository.root.unload_signing_key(self.role_keys['snapshot']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), os.path.join(self.repository_directory, 'metadata')) - # Root's version number = 4... + # Root's version number = 3... self.repository_updater.refresh() repository.root.remove_verification_key(self.role_keys['targets']['public']) repository.root.unload_signing_key(self.role_keys['targets']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), os.path.join(self.repository_directory, 'metadata')) - # Root's version number = 5... + # Root's version number = 4... self.repository_updater.refresh() - self.assertEqual(self.repository_updater.metadata['current']['root']['version'], 5) + self.assertEqual(self.repository_updater.metadata['current']['root']['version'], 4) # Verify that the client is able to recognize that a new set of keys have # been added to the Root role. - # First, has 'root`_keyid' been removed? + # First, has 'root_keyid' been removed? root_roleinfo = tuf.roledb.get_roleinfo('root', self.repository_name) self.assertTrue(root_keyid not in root_roleinfo['keyids']) @@ -450,8 +457,7 @@ def _load_role_keys(keystore_directory): # Populating 'self.role_keys' by importing the required public and private # keys of 'tuf/tests/repository_data/'. The role keys are needed when - # modifying the remote repository used by the test cases in this unit test. - + # modifying the remote repository used by the test cases in this unit test. # The pre-generated key files in 'repository_data/keystore' are all encrypted with # a 'password' passphrase. EXPECTED_KEYFILE_PASSWORD = 'password' diff --git a/tests/test_keys.py b/tests/test_keys.py index a59f79f61f..0845457b5f 100755 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -15,7 +15,6 @@ Test cases for test_keys.py. - TODO: test case for ed25519 key generation and refactor. """ # Help with Python 3 compatibility, where the print statement is a function, an @@ -99,14 +98,22 @@ def test_format_keyval_to_metadata(self): self.assertRaises(tuf.FormatError, KEYS.format_keyval_to_metadata, 'bad_keytype', keyvalue) + # Test for missing 'public' entry. public = keyvalue['public'] del keyvalue['public'] self.assertRaises(tuf.FormatError, KEYS.format_keyval_to_metadata, keytype, keyvalue) keyvalue['public'] = public - - - + + # Test for missing 'private' entry. + private = keyvalue['private'] + del keyvalue['private'] + self.assertRaises(tuf.FormatError, KEYS.format_keyval_to_metadata, + keytype, keyvalue, private=True) + keyvalue['private'] = private + + + def test_format_rsakey_from_pem(self): pem = self.rsakey_dict['keyval']['public'] rsa_key = KEYS.format_rsakey_from_pem(pem) @@ -327,6 +334,36 @@ def test_decrypt_key(self): KEYS._GENERAL_CRYPTO_LIBRARY = default_general_library + + def test_extract_pem(self): + # Normal case. + private_pem = KEYS.extract_pem(self.rsakey_dict['keyval']['private'], + private_pem=True) + self.assertTrue(tuf.formats.PEMRSA_SCHEMA.matches(private_pem)) + + # Test for an invalid PEM. + pem_header = '-----BEGIN RSA PRIVATE KEY-----' + pem_footer = '-----END RSA PRIVATE KEY-----' + + header_start = private_pem.index(pem_header) + footer_start = private_pem.index(pem_footer, header_start + len(pem_header)) + + missing_header = private_pem[header_start + len(pem_header):footer_start + len(pem_footer)] + missing_footer = private_pem[header_start:footer_start] + #print('missing header: ' + repr(missing_header)) + #print('missing footer: ' + repr(missing_footer)) + + self.assertRaises(tuf.FormatError, KEYS.extract_pem, + 'invalid_pem', private_pem=True) + + self.assertRaises(tuf.FormatError, KEYS.extract_pem, + missing_header, private_pem=True) + + self.assertRaises(tuf.FormatError, KEYS.extract_pem, + missing_footer, private_pem=True) + + + # Run the unit tests. if __name__ == '__main__': unittest.main() diff --git a/tests/test_mix_and_match_attack.py b/tests/test_mix_and_match_attack.py index 9225b43a35..d1644c402a 100755 --- a/tests/test_mix_and_match_attack.py +++ b/tests/test_mix_and_match_attack.py @@ -221,7 +221,7 @@ def test_with_tuf(self): file_object.write('This is role2\'s target file.') repository.targets('role1').add_target(file3_path) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) diff --git a/tests/test_pyca_crypto_keys.py b/tests/test_pyca_crypto_keys.py index 790e021320..998a7d9ecb 100755 --- a/tests/test_pyca_crypto_keys.py +++ b/tests/test_pyca_crypto_keys.py @@ -105,14 +105,15 @@ def test_verify_rsa_signature(self): self.assertEqual(True, valid_signature) # Check for improperly formatted arguments. + self.assertRaises(tuf.FormatError, crypto_keys.verify_rsa_signature, 123, method, + public_rsa, data) + self.assertRaises(tuf.FormatError, crypto_keys.verify_rsa_signature, signature, 123, public_rsa, data) self.assertRaises(tuf.FormatError, crypto_keys.verify_rsa_signature, signature, method, 123, data) - self.assertRaises(tuf.FormatError, crypto_keys.verify_rsa_signature, 123, method, - public_rsa, data) self.assertRaises(tuf.UnknownMethodError, crypto_keys.verify_rsa_signature, signature, @@ -122,8 +123,6 @@ def test_verify_rsa_signature(self): # Check for invalid 'signature', 'public_key', and 'data' arguments. self.assertRaises(tuf.FormatError, crypto_keys.verify_rsa_signature, signature, method, public_rsa, 123) - - self.assertRaises(tuf.CryptoError, crypto_keys.verify_rsa_signature, signature, method, 'bad_key', data) @@ -137,6 +136,8 @@ def test_verify_rsa_signature(self): self.assertEqual(False, crypto_keys.verify_rsa_signature(mismatched_signature, method, public_rsa, data)) + + def test__decrypt(self): # Verify that invalid encrypted file is detected. self.assertRaises(tuf.CryptoError, crypto_keys._decrypt, @@ -145,11 +146,68 @@ def test__decrypt(self): def test_encrypt_key(self): - # Verify that a key argument with a missing private key is rejected. - global public_rsa - + # Normal case. + ed25519_key = {'keytype': 'ed25519', + 'keyid': 'd62247f817883f593cf6c66a5a55292488d457bcf638ae03207dbbba9dbe457d', + 'keyval': {'public': '74addb5ad544a4306b34741bc1175a3613a8d7dc69ff64724243efdec0e301ad', + 'private': '1f26964cc8d4f7ee5f3c5da2fbb7ab35811169573ac367b860a537e47789f8c4'}} + + crypto_keys.encrypt_key(ed25519_key, 'password') + + # Verify that a key with a missing 'private' key is rejected. + del ed25519_key['keyval']['private'] self.assertRaises(tuf.FormatError, crypto_keys.encrypt_key, - public_rsa, 'password') + ed25519_key, 'password') + + + + def test__decrypt_key(self): + ed25519_key = {'keytype': 'ed25519', + 'keyid': 'd62247f817883f593cf6c66a5a55292488d457bcf638ae03207dbbba9dbe457d', + 'keyval': {'public': '74addb5ad544a4306b34741bc1175a3613a8d7dc69ff64724243efdec0e301ad', + 'private': '1f26964cc8d4f7ee5f3c5da2fbb7ab35811169573ac367b860a537e47789f8c4'}} + + encrypted_key = crypto_keys.encrypt_key(ed25519_key, 'password') + crypto_keys.encrypt_key(ed25519_key, 'password') + + salt, iterations, hmac, iv, ciphertext = \ + encrypted_key.split(crypto_keys._ENCRYPTION_DELIMITER) + + encrypted_key_invalid_hmac = encrypted_key.replace(hmac, '123abc') + + self.assertRaises(tuf.CryptoError, crypto_keys._decrypt, + encrypted_key_invalid_hmac, 'password') + + + + def test_create_rsa_public_and_private_from_encrypted_pem(self): + self.assertRaises(tuf.CryptoError, + crypto_keys.create_rsa_public_and_private_from_encrypted_pem, + 'bad_encrypted_key', 'password') + + + + def test_create_rsa_encrypted_pem(self): + global private_rsa + passphrase = 'password' + + # Verify normal case. + encrypted_pem = crypto_keys.create_rsa_encrypted_pem(private_rsa, passphrase) + + self.assertTrue(tuf.formats.PEMRSA_SCHEMA.matches(encrypted_pem)) + + # Test for invalid arguments. + self.assertRaises(tuf.FormatError, crypto_keys.create_rsa_encrypted_pem, + 1, passphrase) + self.assertRaises(tuf.FormatError, crypto_keys.create_rsa_encrypted_pem, + private_rsa, 2) + + self.assertRaises(TypeError, crypto_keys.create_rsa_encrypted_pem, + '', passphrase) + + self.assertRaises(tuf.CryptoError, crypto_keys.create_rsa_encrypted_pem, + 'bad_private_pem', passphrase) + diff --git a/tests/test_replay_attack.py b/tests/test_replay_attack.py index 76b2180bb5..e3cca2175b 100755 --- a/tests/test_replay_attack.py +++ b/tests/test_replay_attack.py @@ -218,7 +218,7 @@ def test_without_tuf(self): # Set an arbitrary expiration so that the repository tool generates a new # version. repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 12, 12) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -289,7 +289,7 @@ def test_with_tuf(self): # Set an arbitrary expiration so that the repository tool generates a new # version. repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 12, 12) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) diff --git a/tests/test_repository_lib.py b/tests/test_repository_lib.py index 4d798248d0..9ad461664a 100755 --- a/tests/test_repository_lib.py +++ b/tests/test_repository_lib.py @@ -452,7 +452,7 @@ def test_generate_root_metadata(self): # Test improperly formatted arguments. self.assertRaises(tuf.FormatError, repo_lib.generate_root_metadata, - '3', expires, False) + '3', expires, False) self.assertRaises(tuf.FormatError, repo_lib.generate_root_metadata, 1, '3', False) self.assertRaises(tuf.FormatError, repo_lib.generate_root_metadata, @@ -900,7 +900,6 @@ def test__generate_and_write_metadata(self): tuf.roledb.add_role('obsolete_role', targets_roleinfo) repo_lib._generate_and_write_metadata('obsolete_role', obsolete_metadata, - True, targets_directory, metadata_directory, consistent_snapshot=False, filenames=None, @@ -961,6 +960,13 @@ def test__load_top_level_metadata(self): shutil.copytree(os.path.join('repository_data', 'repository', 'targets'), targets_directory) + # Add a duplicate signature to the Root file for testing purposes). + root_file = os.path.join(metadata_directory, 'root.json') + signable = tuf.util.load_json_file(os.path.join(metadata_directory, 'root.json')) + signable['signatures'].append(signable['signatures'][0]) + + repo_lib.write_metadata_file(signable, root_file, 8, ['gz'], False) + # Remove compressed metadata so that we can test for loading of a # repository with no compression enabled. for role_file in os.listdir(metadata_directory): @@ -972,19 +978,16 @@ def test__load_top_level_metadata(self): repository = repo_tool.create_new_repository(repository_directory) repo_lib._load_top_level_metadata(repository, filenames) - # We partially loaded 'role1' via the top-level Targets role. For the - # purposes of this test case (which only loads top-level metadata and no - # delegated metadata), remove this role to avoid issues with partially - # loaded information (e.g., missing 'version' info, signatures, etc.) - tuf.roledb.remove_role('role1') - # Partially write all top-level roles (we increase the threshold of each # top-level role so that they are flagged as partially written. repository.root.threshold = repository.root.threshold + 1 repository.snapshot.threshold = repository.snapshot.threshold + 1 repository.targets.threshold = repository.targets.threshold + 1 repository.timestamp.threshold = repository.timestamp.threshold + 1 - repository.write(write_partial=True) + repository.write('root', ) + repository.write('snapshot') + repository.write('targets') + repository.write('timestamp') repo_lib._load_top_level_metadata(repository, filenames) @@ -1028,9 +1031,21 @@ def test__remove_invalid_and_duplicate_signatures(self): tuf.repository_lib._remove_invalid_and_duplicate_signatures(root_signable) self.assertEqual(len(root_signable), expected_number_of_signatures) - # Test that invalid keyid are ignored. + # Test for an invalid keyid. root_signable['signatures'][0]['keyid'] = '404' tuf.repository_lib._remove_invalid_and_duplicate_signatures(root_signable) + + # Re-add a valid signature for the following test condition. + root_signable['signatures'].append(new_pss_signature) + + # Test that an exception is not raised if an invalid sig is present, + # and that the duplicate key is removed 'root_signable'. + root_signable['signatures'][0]['sig'] = '4040' + invalid_keyid = root_signable['signatures'][0]['keyid'] + tuf.repository_lib._remove_invalid_and_duplicate_signatures(root_signable) + + for signature in root_signable['signatures']: + self.assertFalse(invalid_keyid == signature['keyid']) diff --git a/tests/test_repository_tool.py b/tests/test_repository_tool.py index 1690057b02..7993af5384 100755 --- a/tests/test_repository_tool.py +++ b/tests/test_repository_tool.py @@ -112,7 +112,7 @@ def test_init(self): - def test_write_and_write_partial(self): + def test_writeall(self): # Test creation of a TUF repository. # # 1. Import public and private keys. @@ -120,9 +120,9 @@ def test_write_and_write_partial(self): # 3. Load signing keys. # 4. Add target files. # 5. Perform delegation. - # 5. write() + # 6. writeall() # - # Copy the target files from 'tuf/tests/repository_data' so that write() + # Copy the target files from 'tuf/tests/repository_data' so that writeall() # has target fileinfo to include in metadata. temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) targets_directory = os.path.join(temporary_directory, 'repository', @@ -186,9 +186,9 @@ def test_write_and_write_partial(self): repository.targets.add_verification_key(targets_pubkey) repository.snapshot.add_verification_key(snapshot_pubkey) - # Verify that repository.write() fails for insufficient threshold + # Verify that repository.writeall() fails for insufficient threshold # of signatures (default threshold = 1). - self.assertRaises(tuf.UnsignedMetadataError, repository.write) + self.assertRaises(tuf.UnsignedMetadataError, repository.writeall) repository.timestamp.add_verification_key(timestamp_pubkey) @@ -198,9 +198,9 @@ def test_write_and_write_partial(self): repository.targets.load_signing_key(targets_privkey) repository.snapshot.load_signing_key(snapshot_privkey) - # Verify that repository.write() fails for insufficient threshold + # Verify that repository.writeall() fails for insufficient threshold # of signatures (default threshold = 1). - self.assertRaises(tuf.UnsignedMetadataError, repository.write) + self.assertRaises(tuf.UnsignedMetadataError, repository.writeall) repository.timestamp.load_signing_key(timestamp_privkey) @@ -218,7 +218,7 @@ def test_write_and_write_partial(self): # (6) Write repository. repository.targets.compressions = ['gz'] - repository.write() + repository.writeall() # Verify that the expected metadata is written. for role in ['root.json', 'targets.json', 'snapshot.json', 'timestamp.json']: @@ -239,8 +239,9 @@ def test_write_and_write_partial(self): role1_signable = tuf.util.load_json_file(role1_filepath) tuf.formats.check_signable_object_format(role1_signable) - # Verify that an exception is *not* raised for multiple repository.write(). - repository.write() + # Verify that an exception is *not* raised for multiple + # repository.writeall(). + repository.writeall() # Verify that status() does not raise an exception. repository.status() @@ -308,17 +309,14 @@ def test_write_and_write_partial(self): repository.timestamp.unload_signing_key(root_privkey) repository.timestamp.load_signing_key(timestamp_privkey) - # Verify that a write() fails if a repository is loaded and a change + # Verify that a writeall() fails if a repository is loaded and a change # is made to a role. repo_tool.load_repository(repository_directory) repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 12, 0) - self.assertRaises(tuf.UnsignedMetadataError, repository.write) + self.assertRaises(tuf.UnsignedMetadataError, repository.writeall) - # Verify that a write_partial() is allowed. - repository.write_partial() - - # Next, perform a non-partial write() with consistent snapshots enabled. + # Next, perform a writeall() with consistent snapshots enabled. # Since the timestamp was modified, load its private key. repository.timestamp.load_signing_key(timestamp_privkey) @@ -326,27 +324,28 @@ def test_write_and_write_partial(self): # snapshot modifies the Root metadata, which specifies whether a repository # supports consistent snapshots. Verify that an exception is raised due to # the missing signatures of Root and Snapshot. - self.assertRaises(tuf.UnsignedMetadataError, repository.write, - False, True) + self.assertRaises(tuf.UnsignedMetadataError, repository.writeall, True) # Load the private keys of Root and Snapshot (new version required since - # Root has changed.) + # Root will change to enable consistent snapshots. repository.root.load_signing_key(root_privkey) + repository.targets.load_signing_key(targets_privkey) repository.snapshot.load_signing_key(snapshot_privkey) + repository.targets('role1').load_signing_key(role1_privkey) # Verify that a consistent snapshot can be written and loaded. The - # 'targets' and 'role1' roles must be be marked as dirty, otherwise - # write() will not create consistent snapshots for them. + # 'targets' and 'role1' roles must be marked as dirty, otherwise writeall() + # will not create consistent snapshots for them. repository.mark_dirty(['targets', 'role1']) - repository.write(consistent_snapshot=True) + repository.writeall(consistent_snapshot=True) # Verify that the newly written consistent snapshot can be loaded # successfully. repo_tool.load_repository(repository_directory) # Test improperly formatted arguments. - self.assertRaises(tuf.FormatError, repository.write, 3, False) - self.assertRaises(tuf.FormatError, repository.write, False, 3) + self.assertRaises(tuf.FormatError, repository.writeall, 3, False) + self.assertRaises(tuf.FormatError, repository.writeall, False, 3) @@ -360,10 +359,11 @@ def test_get_filepaths_in_directory(self): # Verify the expected filenames. get_filepaths_in_directory() returns # a list of absolute paths. metadata_files = repo.get_filepaths_in_directory(metadata_directory) - expected_files = ['root.json', 'root.json.gz', 'targets.json', - 'targets.json.gz', 'snapshot.json', 'snapshot.json.gz', - 'timestamp.json', 'timestamp.json.gz', 'role1.json', - 'role1.json.gz'] + expected_files = ['1.root.json', '1.root.json.gz', 'root.json', + 'root.json.gz', 'targets.json', 'targets.json.gz', + 'snapshot.json', 'snapshot.json.gz', 'timestamp.json', + 'timestamp.json.gz', 'role1.json', 'role1.json.gz', + 'role2.json', 'role2.json.gz'] basenames = [] for filepath in metadata_files: basenames.append(os.path.basename(filepath)) @@ -555,7 +555,7 @@ def test_compressions(self): def test_add_verification_key(self): - # Add verification key and verify with keys() that it was added. + # Add verification key and verify that it was added via (role).keys. key_path = os.path.join('repository_data', 'keystore', 'snapshot_key.pub') key_object = repo_tool.import_ed25519_publickey_from_file(key_path) @@ -563,10 +563,42 @@ def test_add_verification_key(self): keyid = key_object['keyid'] self.assertEqual([keyid], self.metadata.keys) + + expiration = \ + tuf.formats.unix_timestamp_to_datetime(int(time.time() + 86400)) + expiration = expiration.isoformat() + 'Z' + roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, + 'signatures': [], 'version': 0, + 'consistent_snapshot': False, + 'compressions': [''], 'expires': expiration, + 'partial_loaded': False} + + tuf.roledb.add_role('Root', roleinfo) + tuf.roledb.add_role('Targets', roleinfo) + tuf.roledb.add_role('Snapshot', roleinfo) + tuf.roledb.add_role('Timestamp', roleinfo) + + # Test for different top-level role names. + self.metadata._rolename = 'Targets' + self.metadata.add_verification_key(key_object) + self.metadata._rolename = 'Snapshot' + self.metadata.add_verification_key(key_object) + self.metadata._rolename = 'Timestamp' + self.metadata.add_verification_key(key_object) + # Test for a given 'expires' argument. + expires = datetime.datetime(2030, 1, 1, 12, 0) + self.metadata.add_verification_key(key_object, expires) + + # Test for an expired 'expires'. + expired = datetime.datetime(1984, 1, 1, 12, 0) + self.assertRaises(tuf.Error, + self.metadata.add_verification_key, key_object, expired) + # Test improperly formatted key argument. self.assertRaises(tuf.FormatError, self.metadata.add_verification_key, 3) + self.assertRaises(tuf.FormatError, self.metadata.add_verification_key, key_object, 3) @@ -663,13 +695,22 @@ def test_add_signature(self): root_signable = tuf.util.load_json_file(root_filepath) signatures = root_signable['signatures'] - # Add the first signature from the list, as only need one is needed. + # Add the first signature from the list, as only one is needed. self.metadata.add_signature(signatures[0]) self.assertEqual(signatures, self.metadata.signatures) + # Verify that a signature is added if a 'signatures' entry is not present. + tuf.roledb.create_roledb_from_root_metadata(root_signable['signed']) + del tuf.roledb._roledb_dict['default']['root']['signatures'] + self.metadata._rolename = 'root' + self.metadata.add_signature(signatures[0]) + + # Add a duplicate signature. + self.metadata.add_signature(signatures[0]) # Test improperly formatted signature argument. self.assertRaises(tuf.FormatError, self.metadata.add_signature, 3) + self.assertRaises(tuf.FormatError, self.metadata.add_signature, signatures[0], 3) @@ -948,7 +989,7 @@ def test_delegations(self): threshold = 1 self.targets_object.delegate(rolename, public_keys, list_of_targets, - threshold, backtrack=True, + threshold, terminating=False, restricted_paths=None, path_hash_prefixes=None) @@ -1107,7 +1148,7 @@ def test_delegate(self): path_hash_prefixes = ['e3a3', '8fae', 'd543'] self.targets_object.delegate(rolename, public_keys, list_of_targets, - threshold, backtrack=True, + threshold, terminating=False, restricted_paths=restricted_paths, path_hash_prefixes=path_hash_prefixes) @@ -1116,7 +1157,7 @@ def test_delegate(self): # Try to delegate to a role that has already been delegated. self.assertRaises(tuf.Error, self.targets_object.delegate, rolename, - public_keys, list_of_targets, threshold, backtrack=True, + public_keys, list_of_targets, threshold, terminating=False, restricted_paths=restricted_paths, path_hash_prefixes=path_hash_prefixes) @@ -1124,13 +1165,13 @@ def test_delegate(self): self.targets_object.revoke(rolename) self.assertRaises(tuf.Error, self.targets_object.delegate, rolename, public_keys, ['non-existent.txt'], threshold, - backtrack=True, restricted_paths=restricted_paths, + terminating=False, restricted_paths=restricted_paths, path_hash_prefixes=path_hash_prefixes) # Test for targets that do not exist under the targets directory. self.assertRaises(tuf.Error, self.targets_object.delegate, rolename, public_keys, list_of_targets, threshold, - backtrack=True, restricted_paths=['non-existent.txt'], + terminating=False, restricted_paths=['non-existent.txt'], path_hash_prefixes=path_hash_prefixes) @@ -1506,7 +1547,7 @@ def test_load_repository(self): # Verify the expected roles have been loaded. See # 'tuf/tests/repository_data/repository/'. expected_roles = \ - ['root', 'targets', 'snapshot', 'timestamp', 'role1'] + ['root', 'targets', 'snapshot', 'timestamp', 'role1', 'role2'] for role in tuf.roledb.get_rolenames(): self.assertTrue(role in expected_roles) @@ -1514,7 +1555,7 @@ def test_load_repository(self): self.assertTrue(len(repository.targets.keys)) self.assertTrue(len(repository.snapshot.keys)) self.assertTrue(len(repository.timestamp.keys)) - self.assertTrue(len(repository.targets('role1').keys)) + self.assertEqual(1, repository.targets('role1').version) # Assumed the targets (tuf/tests/repository_data/) role contains 'file1.txt' # and 'file2.txt'. @@ -1537,6 +1578,15 @@ def test_load_repository(self): + def test_dirty_roles(self): + original_repository_directory = os.path.join('repository_data', + 'repository') + repository = repo_tool.load_repository(original_repository_directory) + + # dirty_roles() only logs the list of dirty roles. + repository.dirty_roles() + + # Run the test cases. if __name__ == '__main__': unittest.main() diff --git a/tests/test_root_versioning_integration.py b/tests/test_root_versioning_integration.py new file mode 100755 index 0000000000..28f2c64d6d --- /dev/null +++ b/tests/test_root_versioning_integration.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python + +""" + + test_root_versioning_integration.py + + + Evan Cordell. + + + July 21, 2016. + + + See LICENSE for licensing information. + + + Test root versioning for efficient root key rotation. +""" + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals + +import os +import logging +import tempfile +import shutil +import sys + +# 'unittest2' required for testing under Python < 2.7. +if sys.version_info >= (2, 7): + import unittest +else: + import unittest2 as unittest + +import tuf +import tuf.log +import tuf.formats +import tuf.roledb +import tuf.keydb +import tuf.hash +import tuf.repository_tool as repo_tool + +logger = logging.getLogger('tuf.test_root_versioning') + +repo_tool.disable_console_log_messages() + + +class TestRepository(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.temporary_directory) + + def tearDown(self): + tuf.roledb.clear_roledb() + tuf.keydb.clear_keydb() + + def test_init(self): + # Test normal case. + repository = repo_tool.Repository('repository_directory/', + 'metadata_directory/', + 'targets_directory/') + self.assertTrue(isinstance(repository.root, repo_tool.Root)) + self.assertTrue(isinstance(repository.snapshot, repo_tool.Snapshot)) + self.assertTrue(isinstance(repository.timestamp, repo_tool.Timestamp)) + self.assertTrue(isinstance(repository.targets, repo_tool.Targets)) + + # Test improperly formatted arguments. + self.assertRaises(tuf.FormatError, repo_tool.Repository, 3, + 'metadata_directory/', 'targets_directory') + self.assertRaises(tuf.FormatError, repo_tool.Repository, + 'repository_directory', 3, 'targets_directory') + self.assertRaises(tuf.FormatError, repo_tool.Repository, + 'repository_directory', 'metadata_directory', 3) + + + + def test_root_role_versioning(self): + # Test root role versioning + # + # 1. Import public and private keys. + # 2. Add verification keys. + # 3. Load signing keys. + # 4. Add target files. + # 5. Perform delegation. + # 6. writeall() + # + # Copy the target files from 'tuf/tests/repository_data' so that writeall() + # has target fileinfo to include in metadata. + temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) + targets_directory = os.path.join(temporary_directory, 'repository', + repo_tool.TARGETS_DIRECTORY_NAME) + original_targets_directory = os.path.join('repository_data', + 'repository', 'targets') + shutil.copytree(original_targets_directory, targets_directory) + + # In this case, create_new_repository() creates the 'repository/' + # sub-directory in 'temporary_directory' if it does not exist. + repository_directory = os.path.join(temporary_directory, 'repository') + metadata_directory = os.path.join(repository_directory, + repo_tool.METADATA_STAGED_DIRECTORY_NAME) + repository = repo_tool.create_new_repository(repository_directory) + + + + + # (1) Load the public and private keys of the top-level roles, and one + # delegated role. + keystore_directory = os.path.join('repository_data', 'keystore') + + # Load the public keys. + root_pubkey_path = os.path.join(keystore_directory, 'root_key.pub') + targets_pubkey_path = os.path.join(keystore_directory, 'targets_key.pub') + snapshot_pubkey_path = os.path.join(keystore_directory, 'snapshot_key.pub') + timestamp_pubkey_path = os.path.join(keystore_directory, 'timestamp_key.pub') + role1_pubkey_path = os.path.join(keystore_directory, 'delegation_key.pub') + + root_pubkey = repo_tool.import_rsa_publickey_from_file(root_pubkey_path) + targets_pubkey = repo_tool.import_ed25519_publickey_from_file(targets_pubkey_path) + snapshot_pubkey = \ + repo_tool.import_ed25519_publickey_from_file(snapshot_pubkey_path) + timestamp_pubkey = \ + repo_tool.import_ed25519_publickey_from_file(timestamp_pubkey_path) + role1_pubkey = repo_tool.import_ed25519_publickey_from_file(role1_pubkey_path) + + # Load the private keys. + root_privkey_path = os.path.join(keystore_directory, 'root_key') + targets_privkey_path = os.path.join(keystore_directory, 'targets_key') + snapshot_privkey_path = os.path.join(keystore_directory, 'snapshot_key') + timestamp_privkey_path = os.path.join(keystore_directory, 'timestamp_key') + role1_privkey_path = os.path.join(keystore_directory, 'delegation_key') + + root_privkey = \ + repo_tool.import_rsa_privatekey_from_file(root_privkey_path, 'password') + targets_privkey = \ + repo_tool.import_ed25519_privatekey_from_file(targets_privkey_path, 'password') + snapshot_privkey = \ + repo_tool.import_ed25519_privatekey_from_file(snapshot_privkey_path, + 'password') + timestamp_privkey = \ + repo_tool.import_ed25519_privatekey_from_file(timestamp_privkey_path, + 'password') + role1_privkey = \ + repo_tool.import_ed25519_privatekey_from_file(role1_privkey_path, + 'password') + + + # (2) Add top-level verification keys. + repository.root.add_verification_key(root_pubkey) + repository.targets.add_verification_key(targets_pubkey) + repository.snapshot.add_verification_key(snapshot_pubkey) + repository.timestamp.add_verification_key(timestamp_pubkey) + + + # (3) Load top-level signing keys. + repository.root.load_signing_key(root_privkey) + repository.targets.load_signing_key(targets_privkey) + repository.snapshot.load_signing_key(snapshot_privkey) + repository.timestamp.load_signing_key(timestamp_privkey) + + # (4) Add target files. + target1 = os.path.join(targets_directory, 'file1.txt') + target2 = os.path.join(targets_directory, 'file2.txt') + target3 = os.path.join(targets_directory, 'file3.txt') + repository.targets.add_target(target1) + repository.targets.add_target(target2) + + + # (5) Perform delegation. + repository.targets.delegate('role1', [role1_pubkey], [target3]) + repository.targets('role1').load_signing_key(role1_privkey) + + # (6) Write repository. + repository.targets.compressions = ['gz'] + repository.writeall() + + self.assertTrue(os.path.exists(os.path.join(metadata_directory, 'root.json'))) + self.assertTrue(os.path.exists(os.path.join(metadata_directory, '1.root.json'))) + + + # Verify that the expected metadata is written. + root_filepath = os.path.join(metadata_directory, 'root.json') + root_1_filepath = os.path.join(metadata_directory, '1.root.json') + root_2_filepath = os.path.join(metadata_directory, '2.root.json') + old_root_signable = tuf.util.load_json_file(root_filepath) + root_1_signable = tuf.util.load_json_file(root_1_filepath) + + # Make a change to the root keys + repository.root.add_verification_key(targets_pubkey) + repository.root.load_signing_key(targets_privkey) + repository.root.threshold = 2 + repository.writeall() + + new_root_signable = tuf.util.load_json_file(root_filepath) + root_2_signable = tuf.util.load_json_file(root_2_filepath) + + for role_signable in [old_root_signable, new_root_signable, root_1_signable, root_2_signable]: + # Raise 'tuf.FormatError' if 'role_signable' is an invalid signable. + tuf.formats.check_signable_object_format(role_signable) + + # Verify contents of versioned roots + self.assertEqual(old_root_signable, root_1_signable) + self.assertEqual(new_root_signable, root_2_signable) + + self.assertEqual(root_1_signable['signed']['version'], 1) + self.assertEqual(root_2_signable['signed']['version'], 2) + + repository.root.remove_verification_key(root_pubkey) + repository.root.unload_signing_key(root_privkey) + repository.root.threshold = 2 + + # Errors, not enough signing keys to satisfy old threshold + self.assertRaises(tuf.UnsignedMetadataError, repository.writeall) + + # No error, write() ignore's root's threshold and allows it to be written + # to disk partially signed. + repository.write('root') + + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_sig.py b/tests/test_sig.py index 2ac541fd43..5e0394458e 100755 --- a/tests/test_sig.py +++ b/tests/test_sig.py @@ -50,17 +50,24 @@ class TestSig(unittest.TestCase): def setUp(self): pass - def tearDown(self): - pass + tuf.roledb.clear_roledb() + tuf.keydb.clear_keydb() def test_get_signature_status_no_role(self): - signable = {'signed' : 'test', 'signatures' : []} + signable = {'signed': 'test', 'signatures': []} - # A valid, but empty signature status + # A valid, but empty signature status. sig_status = tuf.sig.get_signature_status(signable) self.assertTrue(tuf.formats.SIGNATURESTATUS_SCHEMA.matches(sig_status)) + + self.assertEqual(0, sig_status['threshold']) + self.assertEqual([], sig_status['good_sigs']) + self.assertEqual([], sig_status['bad_sigs']) + self.assertEqual([], sig_status['unknown_sigs']) + self.assertEqual([], sig_status['untrusted_sigs']) + self.assertEqual([], sig_status['unknown_method_sigs']) # A valid signable, but non-existent role argument. self.assertRaises(tuf.UnknownRoleError, tuf.sig.get_signature_status, @@ -74,19 +81,9 @@ def test_get_signature_status_no_role(self): tuf.keydb.add_key(KEYS[0]) - # No specific role we're considering. - sig_status = tuf.sig.get_signature_status(signable, None) - - # Non-existent role. - self.assertRaises(tuf.UnknownRoleError, tuf.sig.get_signature_status, - signable, 'unknown_role') - - self.assertEqual(0, sig_status['threshold']) - self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) - self.assertEqual([], sig_status['bad_sigs']) - self.assertEqual([], sig_status['unknown_sigs']) - self.assertEqual([], sig_status['untrusted_sigs']) - self.assertEqual([], sig_status['unknown_method_sigs']) + # Improperly formatted role. + self.assertRaises(tuf.FormatError, tuf.sig.get_signature_status, + signable, 1) # Not allowed to call verify() without having specified a role. args = (signable, None) @@ -163,11 +160,12 @@ def test_get_signature_status_single_key(self): signable['signatures'].append(tuf.keys.create_signature( KEYS[0], signable['signed'])) - tuf.keydb.add_key(KEYS[0]) threshold = 1 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid']], threshold) + tuf.roledb.add_role('Root', roleinfo) + tuf.keydb.add_key(KEYS[0]) sig_status = tuf.sig.get_signature_status(signable, 'Root') diff --git a/tests/test_slow_retrieval_attack.py b/tests/test_slow_retrieval_attack.py index 9be066b259..eb999d8bc1 100755 --- a/tests/test_slow_retrieval_attack.py +++ b/tests/test_slow_retrieval_attack.py @@ -186,7 +186,7 @@ def setUp(self): repository.snapshot.load_signing_key(snapshot_private) repository.timestamp.load_signing_key(timestamp_private) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) diff --git a/tests/test_updater.py b/tests/test_updater.py index 88b5a6f270..03a7a5bde5 100755 --- a/tests/test_updater.py +++ b/tests/test_updater.py @@ -744,7 +744,7 @@ def test_3__update_metadata_if_changed(self): repository.targets.load_signing_key(self.role_keys['targets']['private']) repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -816,7 +816,7 @@ def test_4_refresh(self): repository.targets.load_signing_key(self.role_keys['targets']['private']) repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -870,7 +870,7 @@ def test_4__refresh_targets_metadata(self): self.repository_updater._refresh_targets_metadata(refresh_all_delegated_roles=True) # Verify that client's metadata files were refreshed successfully. - self.assertEqual(len(self.repository_updater.metadata['current']), 5) + self.assertEqual(len(self.repository_updater.metadata['current']), 6) # Test for compressed metadata roles. self.repository_updater.metadata['current']['snapshot']['meta']['targets.json.gz'] = \ @@ -997,18 +997,18 @@ def test_6_target(self): repository = repo_tool.load_repository(self.repository_directory) - repository.targets.delegate('role2', [self.role_keys['targets']['public']], + repository.targets.delegate('role3', [self.role_keys['targets']['public']], [], restricted_paths=[foo_pattern]) - repository.targets.delegate('role3', [self.role_keys['targets']['public']], + repository.targets.delegate('role4', [self.role_keys['targets']['public']], [foo_package], restricted_paths=[foo_pattern]) repository.targets.load_signing_key(self.role_keys['targets']['private']) - repository.targets('role2').load_signing_key(self.role_keys['targets']['private']) repository.targets('role3').load_signing_key(self.role_keys['targets']['private']) + repository.targets('role4').load_signing_key(self.role_keys['targets']['private']) repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -1027,21 +1027,21 @@ def test_6_target(self): # return a 'tuf.UnknownTargetError' exception. repository = repo_tool.load_repository(self.repository_directory) - repository.targets.revoke('role2') repository.targets.revoke('role3') + repository.targets.revoke('role4') # Ensure we delegate in trusted order (i.e., 'role2' has higher priority.) - repository.targets.delegate('role2', [self.role_keys['targets']['public']], - [], backtrack=False, restricted_paths=[foo_pattern]) repository.targets.delegate('role3', [self.role_keys['targets']['public']], + [], terminating=True, restricted_paths=[foo_pattern]) + repository.targets.delegate('role4', [self.role_keys['targets']['public']], [foo_package], restricted_paths=[foo_pattern]) - repository.targets('role2').load_signing_key(self.role_keys['targets']['private']) repository.targets('role3').load_signing_key(self.role_keys['targets']['private']) + repository.targets('role4').load_signing_key(self.role_keys['targets']['private']) repository.targets.load_signing_key(self.role_keys['targets']['private']) repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -1108,7 +1108,7 @@ def test_6_download_target(self): repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write(consistent_snapshot=True) + repository.writeall(consistent_snapshot=True) # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -1233,7 +1233,7 @@ def test_7_updated_targets(self): repository.targets.load_signing_key(self.role_keys['targets']['private']) repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) @@ -1278,7 +1278,7 @@ def test_8_remove_obsolete_targets(self): repository.targets.load_signing_key(self.role_keys['targets']['private']) repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) - repository.write() + repository.writeall() # Move the staged metadata to the "live" metadata. shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) diff --git a/tests/test_updater_root_rotation_integration.py b/tests/test_updater_root_rotation_integration.py new file mode 100755 index 0000000000..9ae2d6b1f1 --- /dev/null +++ b/tests/test_updater_root_rotation_integration.py @@ -0,0 +1,367 @@ +#!/usr/bin/env python + +""" + + test_updater_root_rotation_integration.py + + + Evan Cordell. + + + August 8, 2016. + + + See LICENSE for licensing information. + + + 'test_updater_root_rotation.py' provides a collection of methods that test + root key rotation in the example client. + + + Test cases here should follow a specific order (i.e., independent methods are + tested before dependent methods). More accurately, least dependent methods + are tested before most dependent methods. There is no reason to rewrite or + construct other methods that replicate already-tested methods solely for + testing purposes. This is possible because the 'unittest.TestCase' class + guarantees the order of unit tests. The 'test_something_A' method would + be tested before 'test_something_B'. To ensure the expected order of tests, + a number is placed after 'test' and before methods name like so: + 'test_1_check_directory'. The number is a measure of dependence, where 1 is + less dependent than 2. +""" + +# Help with Python 3 compatibility, where the print statement is a function, an +# implicit relative import is invalid, and the '/' operator performs true +# division. Example: print 'hello world' raises a 'SyntaxError' exception. +from __future__ import print_function +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals + +import os +import time +import shutil +import copy +import tempfile +import logging +import random +import subprocess +import sys + +# 'unittest2' required for testing under Python < 2.7. +if sys.version_info >= (2, 7): + import unittest + +else: + import unittest2 as unittest + +import tuf +import tuf.util +import tuf.conf +import tuf.log +import tuf.formats +import tuf.keydb +import tuf.roledb +import tuf.repository_tool as repo_tool +import tuf.unittest_toolbox as unittest_toolbox +import tuf.client.updater as updater +import six + +logger = logging.getLogger('tuf.test_updater_root_rotation_integration') +repo_tool.disable_console_log_messages() + + +class TestUpdater(unittest_toolbox.Modified_TestCase): + + @classmethod + def setUpClass(cls): + # setUpClass() is called before tests in an individual class are executed. + + # Create a temporary directory to store the repository, metadata, and target + # files. 'temporary_directory' must be deleted in TearDownModule() so that + # temporary files are always removed, even when exceptions occur. + cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd()) + + # Launch a SimpleHTTPServer (serves files in the current directory). + # Test cases will request metadata and target files that have been + # pre-generated in 'tuf/tests/repository_data', which will be served + # by the SimpleHTTPServer launched here. The test cases of 'test_updater.py' + # assume the pre-generated metadata files have a specific structure, such + # as a delegated role 'targets/role1', three target files, five key files, + # etc. + cls.SERVER_PORT = random.randint(30000, 45000) + command = ['python', 'simple_server.py', str(cls.SERVER_PORT)] + cls.server_process = subprocess.Popen(command, stderr=subprocess.PIPE) + logger.info('\n\tServer process started.') + logger.info('\tServer process id: '+str(cls.server_process.pid)) + logger.info('\tServing on port: '+str(cls.SERVER_PORT)) + cls.url = 'http://localhost:'+str(cls.SERVER_PORT) + os.path.sep + + # NOTE: Following error is raised if a delay is not applied: + # + time.sleep(1) + + + + @classmethod + def tearDownClass(cls): + # tearDownModule() is called after all the tests have run. + # http://docs.python.org/2/library/unittest.html#class-and-module-fixtures + + # Remove the temporary repository directory, which should contain all the + # metadata, targets, and key files generated for the test cases. + shutil.rmtree(cls.temporary_directory) + + # Kill the SimpleHTTPServer process. + if cls.server_process.returncode is None: + logger.info('\tServer process ' + str(cls.server_process.pid) + ' terminated.') + cls.server_process.kill() + + + + def setUp(self): + # We are inheriting from custom class. + unittest_toolbox.Modified_TestCase.setUp(self) + + # Copy the original repository files provided in the test folder so that + # any modifications made to repository files are restricted to the copies. + # The 'repository_data' directory is expected to exist in 'tuf.tests/'. + original_repository_files = os.path.join(os.getcwd(), 'repository_data') + temporary_repository_root = \ + self.make_temp_directory(directory=self.temporary_directory) + + # The original repository, keystore, and client directories will be copied + # for each test case. + original_repository = os.path.join(original_repository_files, 'repository') + original_keystore = os.path.join(original_repository_files, 'keystore') + original_client = os.path.join(original_repository_files, 'client') + + # Save references to the often-needed client repository directories. + # Test cases need these references to access metadata and target files. + self.repository_directory = \ + os.path.join(temporary_repository_root, 'repository') + self.keystore_directory = \ + os.path.join(temporary_repository_root, 'keystore') + self.client_directory = os.path.join(temporary_repository_root, 'client') + self.client_metadata = os.path.join(self.client_directory, 'metadata') + self.client_metadata_current = os.path.join(self.client_metadata, 'current') + self.client_metadata_previous = \ + os.path.join(self.client_metadata, 'previous') + + # Copy the original 'repository', 'client', and 'keystore' directories + # to the temporary repository the test cases can use. + shutil.copytree(original_repository, self.repository_directory) + shutil.copytree(original_client, self.client_directory) + shutil.copytree(original_keystore, self.keystore_directory) + + # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. + repository_basepath = self.repository_directory[len(os.getcwd()):] + url_prefix = \ + 'http://localhost:' + str(self.SERVER_PORT) + repository_basepath + + # Setting 'tuf.conf.repository_directory' with the temporary client + # directory copied from the original repository files. + tuf.conf.repository_directory = self.client_directory + + self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix, + 'metadata_path': 'metadata', + 'targets_path': 'targets', + 'confined_target_dirs': ['']}} + + # Creating a repository instance. The test cases will use this client + # updater to refresh metadata, fetch target files, etc. + self.repository_name = 'test_repository' + self.repository_updater = updater.Updater(self.repository_name, + self.repository_mirrors) + + # Metadata role keys are needed by the test cases to make changes to the + # repository (e.g., adding a new target file to 'targets.json' and then + # requesting a refresh()). + self.role_keys = _load_role_keys(self.keystore_directory) + + + + def tearDown(self): + # We are inheriting from custom class. + unittest_toolbox.Modified_TestCase.tearDown(self) + tuf.roledb.clear_roledb(clear_all=True) + tuf.keydb.clear_keydb(clear_all=True) + + + + + # UNIT TESTS. + def test_root_rotation(self): + repository = repo_tool.load_repository(self.repository_directory) + repository.root.threshold = 2 + + repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) + repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) + + # Errors, not enough signing keys to satisfy root's threshold. + self.assertRaises(tuf.UnsignedMetadataError, repository.writeall) + + repository.root.add_verification_key(self.role_keys['role1']['public']) + repository.root.load_signing_key(self.role_keys['root']['private']) + repository.root.load_signing_key(self.role_keys['role1']['private']) + repository.writeall() + + repository.root.add_verification_key(self.role_keys['snapshot']['public']) + repository.root.load_signing_key(self.role_keys['snapshot']['private']) + repository.root.threshold = 3 + repository.writeall() + + # Move the staged metadata to the "live" metadata. + shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) + shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), + os.path.join(self.repository_directory, 'metadata')) + + self.repository_updater.refresh() + + + def test_root_rotation_missing_keys(self): + repository = repo_tool.load_repository(self.repository_directory) + + # A partially written root.json (threshold = 1, and not signed in this + # case) causes an invalid root chain later. + repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) + repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) + repository.write('root') + repository.write('snapshot') + repository.write('timestamp') + + # Move the staged metadata to the "live" metadata. + shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) + shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), + os.path.join(self.repository_directory, 'metadata')) + + # Create a new, valid root.json. + repository.root.threshold = 2 + repository.root.add_verification_key(self.role_keys['role1']['public']) + repository.root.load_signing_key(self.role_keys['root']['private']) + repository.root.load_signing_key(self.role_keys['role1']['private']) + + repository.writeall() + + repository.root.add_verification_key(self.role_keys['snapshot']['public']) + repository.root.load_signing_key(self.role_keys['snapshot']['private']) + repository.root.threshold = 3 + repository.writeall() + + # Move the staged metadata to the "live" metadata. + shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) + shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), + os.path.join(self.repository_directory, 'metadata')) + + try: + self.repository_updater.refresh() + + except tuf.NoWorkingMirrorError as exception: + for mirror_url, mirror_error in six.iteritems(exception.mirror_errors): + url_prefix = self.repository_mirrors['mirror1']['url_prefix'] + url_file = os.path.join(url_prefix, 'metadata', '2.root.json') + + # Verify that '2.root.json' is the culprit. + self.assertEqual(url_file, mirror_url) + self.assertTrue(isinstance(mirror_error, tuf.BadSignatureError)) + + + + def test_root_rotation_unmet_threshold(self): + repository = repo_tool.load_repository(self.repository_directory) + + # Add verification keys + repository.root.add_verification_key(self.role_keys['root']['public']) + repository.root.add_verification_key(self.role_keys['role1']['public']) + repository.targets.add_verification_key(self.role_keys['targets']['public']) + repository.snapshot.add_verification_key(self.role_keys['snapshot']['public']) + repository.timestamp.add_verification_key(self.role_keys['timestamp']['public']) + repository.snapshot.load_signing_key(self.role_keys['snapshot']['private']) + repository.timestamp.load_signing_key(self.role_keys['timestamp']['private']) + + # Add signing keys + repository.root.load_signing_key(self.role_keys['root']['private']) + repository.root.load_signing_key(self.role_keys['role1']['private']) + + # Set root threshold + repository.root.threshold = 2 + repository.writeall() + + # Add new verification key + repository.root.add_verification_key(self.role_keys['snapshot']['public']) + + # Remove one of the original signing keys + repository.root.remove_verification_key(self.role_keys['role1']['public']) + repository.root.unload_signing_key(self.role_keys['role1']['private']) + + # Set threshold + repository.root.threshold = 1 + + repository.writeall() + + # Move the staged metadata to the "live" metadata. + shutil.rmtree(os.path.join(self.repository_directory, 'metadata')) + shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'), + os.path.join(self.repository_directory, 'metadata')) + + self.assertRaises(tuf.NoWorkingMirrorError, self.repository_updater.refresh) + + + +def _load_role_keys(keystore_directory): + + # Populating 'self.role_keys' by importing the required public and private + # keys of 'tuf/tests/repository_data/'. The role keys are needed when + # modifying the remote repository used by the test cases in this unit test. + + # The pre-generated key files in 'repository_data/keystore' are all encrypted with + # a 'password' passphrase. + EXPECTED_KEYFILE_PASSWORD = 'password' + + # Store and return the cryptography keys of the top-level roles, including 1 + # delegated role. + role_keys = {} + + root_key_file = os.path.join(keystore_directory, 'root_key') + targets_key_file = os.path.join(keystore_directory, 'targets_key') + snapshot_key_file = os.path.join(keystore_directory, 'snapshot_key') + timestamp_key_file = os.path.join(keystore_directory, 'timestamp_key') + delegation_key_file = os.path.join(keystore_directory, 'delegation_key') + + role_keys = {'root': {}, 'targets': {}, 'snapshot': {}, 'timestamp': {}, + 'role1': {}} + + # Import the top-level and delegated role public keys. + role_keys['root']['public'] = \ + repo_tool.import_rsa_publickey_from_file(root_key_file+'.pub') + role_keys['targets']['public'] = \ + repo_tool.import_ed25519_publickey_from_file(targets_key_file+'.pub') + role_keys['snapshot']['public'] = \ + repo_tool.import_ed25519_publickey_from_file(snapshot_key_file+'.pub') + role_keys['timestamp']['public'] = \ + repo_tool.import_ed25519_publickey_from_file(timestamp_key_file+'.pub') + role_keys['role1']['public'] = \ + repo_tool.import_ed25519_publickey_from_file(delegation_key_file+'.pub') + + # Import the private keys of the top-level and delegated roles. + role_keys['root']['private'] = \ + repo_tool.import_rsa_privatekey_from_file(root_key_file, + EXPECTED_KEYFILE_PASSWORD) + role_keys['targets']['private'] = \ + repo_tool.import_ed25519_privatekey_from_file(targets_key_file, + EXPECTED_KEYFILE_PASSWORD) + role_keys['snapshot']['private'] = \ + repo_tool.import_ed25519_privatekey_from_file(snapshot_key_file, + EXPECTED_KEYFILE_PASSWORD) + role_keys['timestamp']['private'] = \ + repo_tool.import_ed25519_privatekey_from_file(timestamp_key_file, + EXPECTED_KEYFILE_PASSWORD) + role_keys['role1']['private'] = \ + repo_tool.import_ed25519_privatekey_from_file(delegation_key_file, + EXPECTED_KEYFILE_PASSWORD) + + return role_keys + + +if __name__ == '__main__': + unittest.main() diff --git a/tuf/__init__.py b/tuf/__init__.py index b950770860..dc1639a3ba 100755 --- a/tuf/__init__.py +++ b/tuf/__init__.py @@ -191,7 +191,7 @@ def __init__(self, metadata_role_name): self.metadata_role_name = metadata_role_name def __str__(self): - return repr(self.metadata_role_name) + ' metadata has bad signature.' + return repr(self.metadata_role_name) + ' metadata has a bad signature.' diff --git a/tuf/client/updater.py b/tuf/client/updater.py index 76e0a94ecf..4be5a891ec 100755 --- a/tuf/client/updater.py +++ b/tuf/client/updater.py @@ -650,15 +650,8 @@ def refresh(self, unsafely_update_root_if_necessary=True): # _update_metadata() calls below do NOT perform an update if there # is insufficient trusted signatures for the specified metadata. # Raise 'tuf.NoWorkingMirrorError' if an update fails. - - # Is the Root role expired? When the top-level roles are initially loaded - # from disk, their expiration is not checked to allow their updating when - # requested (and give the updater the chance to continue, rather than always - # failing with an expired metadata error.) If - # 'unsafely_update_root_if_necessary' is True, update an expired Root role - # now. Updating the other top-level roles, regardless of their validity, - # should only occur if the root of trust is up-to-date. root_metadata = self.metadata['current']['root'] + try: self._ensure_not_expired(root_metadata, 'root') @@ -666,72 +659,83 @@ def refresh(self, unsafely_update_root_if_necessary=True): # Raise 'tuf.NoWorkingMirrorError' if a valid (not expired, properly # signed, and valid metadata) 'root.json' cannot be installed. if unsafely_update_root_if_necessary: - message = \ - 'Expired Root metadata was loaded from disk. Try to update it now.' - logger.info(message) - self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH) + logger.info('Expired Root metadata was loaded from disk.' + ' Try to update it now.' ) # The caller explicitly requested not to unsafely fetch an expired Root. else: logger.info('An expired Root metadata was loaded and must be updated.') raise - # If an exception is raised during the metadata update attempts, we will - # attempt to update root metadata once by recursing with a special argument - # (unsafely_update_root_if_necessary) to avoid further recursion. - + # TODO: How should the latest root metadata be verified? According to the + # currently trusted root keys? What if all of the currently trusted + # root keys have since been revoked by the latest metadata? Alternatively, + # do we blindly trust the downloaded root metadata here? + self._update_root_metadata(root_metadata) + # Use default but sane information for timestamp metadata, and do not # require strict checks on its required length. - try: - self._update_metadata('timestamp', DEFAULT_TIMESTAMP_UPPERLENGTH) - self._update_metadata_if_changed('snapshot', - referenced_metadata='timestamp') - self._update_metadata_if_changed('root') - self._update_metadata_if_changed('targets') - - # There are two distinct error scenarios that can rise from the - # _update_metadata_if_changed calls in the try block above: - # - # - tuf.NoWorkingMirrorError: - # - # If a change to a metadata file IS detected in an - # _update_metadata_if_changed call, but we are unable to download a - # valid (not expired, properly signed, valid) version of that metadata - # file, a tuf.NoWorkingMirrorError rises to this point. - # - # - tuf.ExpiredMetadataError: - # - # If, on the other hand, a change to a metadata file IS NOT detected - # in a given _update_metadata_if_changed call, but we observe that the - # version of the metadata file we have on hand is now expired, a - # tuf.ExpiredMetadataError exception rises to this point. - # - except tuf.NoWorkingMirrorError: - if unsafely_update_root_if_necessary: - logger.info('Valid top-level metadata cannot be downloaded. Unsafely' - ' update the Root metadata.') - self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH) - self.refresh(unsafely_update_root_if_necessary=False) - - else: - raise - - except tuf.ExpiredMetadataError: - if unsafely_update_root_if_necessary: - logger.info('No changes were detected from the mirrors for a given role' - ', and that metadata that is available on disk has been found to be' - ' expired. Trying to update root in case of foul play.') - self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH) - self.refresh(unsafely_update_root_if_necessary=False) + self._update_metadata('timestamp', DEFAULT_TIMESTAMP_UPPERLENGTH) + # TODO: After fetching snapshot.json, we should either verify the root + # fileinfo referenced there matches what was fetched earlier in + # _update_root_metadata() or make another attempt to download root.json. + self._update_metadata_if_changed('snapshot', + referenced_metadata='timestamp') + self._update_metadata_if_changed('targets') + - # The caller explicitly requested not to unsafely fetch an expired Root. - else: - logger.info('No changes were detected from the mirrors for a given role' - ', and that metadata that is available on disk has been found to be ' - 'expired. Your metadata is out of date.') - raise + def _update_root_metadata(self, current_root_metadata, compression_algorithm=None): + """ + + The root file must be signed by the current root threshold and keys as + well as the previous root threshold and keys. The update process for root + files means that each intermediate root file must be downloaded, to build + a chain of trusted root keys from keys already trusted by the client: + + 1.root -> 2.root -> 3.root + + 3.root must be signed by the threshold and keys of 2.root, and 2.root + must be signed by the threshold and keys of 1.root. + + + current_root_metadata: + The currently held version of root. + + compresison_algorithm: + The compression algorithm used to compress remote metadata. + + + Updates the root metadata files with the latest information. + + + None. + """ + # Retrieve the latest, remote root.json. + latest_root_metadata_file = \ + self._get_metadata_file('root', 'root.json', + tuf.conf.DEFAULT_ROOT_REQUIRED_LENGTH, None, + compression_algorithm=compression_algorithm) + latest_root_metadata = \ + tuf.util.load_json_string(latest_root_metadata_file.read().decode('utf-8')) + + + next_version = current_root_metadata['version'] + 1 + latest_version = latest_root_metadata['signed']['version'] + + # update from the next version of root up to (and including) the latest + # version. For example: + # current = version 1 + # latest = version 3 + # update from 1.root.json to 3.root.json. + for version in range(next_version, latest_version + 1): + # Temporarily set consistent snapshot. Will be updated to whatever is set + # in the latest root.json after running through the intermediates with + # _update_metadata(). + self.consistent_snapshot = True + self._update_metadata('root', tuf.conf.DEFAULT_ROOT_REQUIRED_LENGTH, version=version, + compression_algorithm=compression_algorithm) @@ -775,7 +779,7 @@ def _check_hashes(self, file_object, trusted_hashes): if trusted_hash != computed_hash: raise tuf.BadHashError(trusted_hash, computed_hash) else: - logger.info('The file\'s '+algorithm+' hash is correct: '+trusted_hash) + logger.info('The file\'s ' + algorithm + ' hash is correct: ' + trusted_hash) @@ -994,6 +998,7 @@ def _verify_uncompressed_metadata_file(self, metadata_file_object, # metadata. # Verify the signature on the downloaded metadata object. + valid = tuf.sig.verify(metadata_signable, metadata_role, self.updater_name) if not valid: @@ -1175,7 +1180,7 @@ def _get_metadata_file(self, metadata_role, remote_filename, # 'timestamp.json', if available, is less than what was downloaded. # Otherwise, accept the new timestamp with version number # 'version_downloaded'. - logger.info('metadata_role: ' + repr(metadata_role)) + try: current_version = \ self.metadata['current'][metadata_role]['version'] @@ -1188,7 +1193,7 @@ def _get_metadata_file(self, metadata_role, remote_filename, logger.info(metadata_role + ' not available locally.') self._verify_uncompressed_metadata_file(file_object, metadata_role) - + except Exception as exception: # Remember the error from this mirror, and "reset" the target file. logger.exception('Update failed from ' + file_mirror + '.') @@ -1205,6 +1210,22 @@ def _get_metadata_file(self, metadata_role, remote_filename, logger.error('Failed to update {0} from all mirrors: {1}'.format( remote_filename, file_mirror_errors)) raise tuf.NoWorkingMirrorError(file_mirror_errors) + + + + def _verify_root_chain_link(self, role, current, next): + if role != 'root': + return True + + current_role = current['roles'][role] + + # Verify next metadata with current keys/threshold + valid = tuf.sig.verify(next, role, self.updater_name, + current_role['threshold'], current_role['keyids']) + + if not valid: + raise tuf.BadSignatureError('Root is not signed by previous threshold' + ' of keys.') @@ -1474,12 +1495,11 @@ def _update_metadata(self, metadata_role, upperbound_filelength, version=None, remote_filename = metadata_filename filename_version = '' - if self.consistent_snapshot: + if self.consistent_snapshot and version: filename_version = version dirname, basename = os.path.split(remote_filename) remote_filename = os.path.join(dirname, str(filename_version) + '.' + basename) - logger.info('Verifying ' + repr(metadata_role) + '. Requesting version: ' + repr(version)) metadata_file_object = \ self._get_metadata_file(metadata_role, remote_filename, upperbound_filelength, version, @@ -1507,6 +1527,7 @@ def _update_metadata(self, metadata_role, upperbound_filelength, version=None, # 'metadata_file_object' is an instance of tuf.util.TempFile. metadata_signable = \ tuf.util.load_json_string(metadata_file_object.read().decode('utf-8')) + if compression_algorithm == 'gzip': current_uncompressed_filepath = \ os.path.join(self.metadata_directory['current'], @@ -1523,6 +1544,9 @@ def _update_metadata(self, metadata_role, upperbound_filelength, version=None, # stored for 'metadata_role'. updated_metadata_object = metadata_signable['signed'] current_metadata_object = self.metadata['current'].get(metadata_role) + + self._verify_root_chain_link(metadata_role, current_metadata_object, + metadata_signable) # Finally, update the metadata and fileinfo stores, and rebuild the # key and role info for the top-level roles if 'metadata_role' is root. @@ -1605,13 +1629,13 @@ def _update_metadata_via_fileinfo(self, metadata_role, uncompressed_fileinfo, if compression == 'gzip': metadata_filename = metadata_filename + '.gz' - # Attempt a file download from each mirror until the file is downloaded and - # verified. If the signature of the downloaded file is valid, proceed, - # otherwise log a warning and try the next mirror. 'metadata_file_object' - # is the file-like object returned by 'download.py'. 'metadata_signable' - # is the object extracted from 'metadata_file_object'. Metadata saved to - # files are regarded as 'signable' objects, conformant to - # 'tuf.formats.SIGNABLE_SCHEMA'. + # Attempt a file download from each mirror until the file is downloaded + # and verified. If the signature of the downloaded file is valid, + # proceed, otherwise log a warning and try the next mirror. + # 'metadata_file_object' is the file-like object returned by + # 'download.py'. 'metadata_signable' is the object extracted from + # 'metadata_file_object'. Metadata saved to files are regarded as + # 'signable' objects, conformant to 'tuf.formats.SIGNABLE_SCHEMA'. # # Some metadata (presently timestamp) will be downloaded "unsafely", in the # sense that we can only estimate its true length and know nothing about @@ -1648,7 +1672,7 @@ def _update_metadata_via_fileinfo(self, metadata_role, uncompressed_fileinfo, filename_digest = \ random.choice(list(uncompressed_fileinfo['hashes'].values())) dirname, basename = os.path.split(remote_filename) - remote_filename = os.path.join(dirname, filename_digesti + '.' + basename) + remote_filename = os.path.join(dirname, filename_digest + '.' + basename) metadata_file_object = \ self._safely_get_metadata_file(metadata_role, remote_filename, @@ -1787,46 +1811,30 @@ def _update_metadata_if_changed(self, metadata_role, logger.debug(repr(metadata_role) + ' referenced in ' + repr(referenced_metadata)+ '. ' + repr(metadata_role) + ' may be updated.') - - if metadata_role in ['root', 'snapshot']: - # Extract the fileinfo of the uncompressed version of 'metadata_role'. - expected_fileinfo = self.metadata['current'][referenced_metadata] \ - ['meta'] \ - [uncompressed_metadata_filename] - - # Simply return if the metadata for 'metadata_role' has not been updated, - # according to the uncompressed metadata provided by the referenced - # metadata. The metadata is considered updated if its fileinfo has - # changed. - if not self._fileinfo_has_changed(uncompressed_metadata_filename, - expected_fileinfo): - logger.info(repr(uncompressed_metadata_filename) + ' up-to-date.') - - # Since we have not downloaded a new version of this metadata, we - # should check to see if our local version is stale and notify the user - # if so. This raises tuf.ExpiredMetadataError if the metadata we - # have is expired. Resolves issue #322. - self._ensure_not_expired(self.metadata['current'][metadata_role], - metadata_role) - - return - - # The version number is inspected instead for all other roles. The - # metadata is considered updated if its version number is strictly greater - # than its currently trusted version number. - else: - expected_versioninfo = self.metadata['current'][referenced_metadata] \ - ['meta'] \ - [uncompressed_metadata_filename] + + # Simply return if the metadata for 'metadata_role' has not been updated, + # according to the uncompressed metadata provided by the referenced + # metadata. The metadata is considered updated if its version number is + # strictly greater than its currently trusted version number. + expected_versioninfo = self.metadata['current'][referenced_metadata] \ + ['meta'] \ + [uncompressed_metadata_filename] + + if not self._versioninfo_has_been_updated(uncompressed_metadata_filename, + expected_versioninfo): + logger.info(repr(uncompressed_metadata_filename) + ' up-to-date.') - if not self._versioninfo_has_been_updated(uncompressed_metadata_filename, - expected_versioninfo): - logger.info(repr(uncompressed_metadata_filename) + ' up-to-date.') - - self._ensure_not_expired(self.metadata['current'][metadata_role], - metadata_role) - - return + # Since we have not downloaded a new version of this metadata, we + # should check to see if our local version is stale and notify the user + # if so. This raises tuf.ExpiredMetadataError if the metadata we + # have is expired. Resolves issue #322. + self._ensure_not_expired(self.metadata['current'][metadata_role], + metadata_role) + # TODO: If 'metadata_role' is root or snapshot, we should verify that + # root's hash matches what's in snapshot, and that snapshot hash matches + # what's listed in timestamp.json. + + return logger.debug('Metadata ' + repr(uncompressed_metadata_filename) + ' has changed.') @@ -1877,13 +1885,8 @@ def _update_metadata_if_changed(self, metadata_role, upperbound_filelength = tuf.conf.DEFAULT_TARGETS_REQUIRED_LENGTH try: - if metadata_role in ['root', 'snapshot']: - self._update_metadata_via_fileinfo(metadata_role, expected_fileinfo, compression) - - # Update all other metadata by way of version number. - else: - self._update_metadata(metadata_role, upperbound_filelength, - expected_versioninfo['version'], compression) + self._update_metadata(metadata_role, upperbound_filelength, + expected_versioninfo['version'], compression) except: # The current metadata we have is not current but we couldn't get new @@ -2498,7 +2501,7 @@ def _targets_of_role(self, rolename, targets=None, skip_refresh=False): return [] # Get the targets specified by the role itself. - for filepath, fileinfo in six.iteritems(self.metadata['current'][rolename]['targets']): + for filepath, fileinfo in six.iteritems(self.metadata['current'][rolename].get('targets', [])): new_target = {} new_target['filepath'] = filepath new_target['fileinfo'] = fileinfo @@ -2697,7 +2700,7 @@ def _preorder_depth_first_walk(self, target_filepath): for child_role in child_roles: child_role_name = self._visit_child_role(child_role, target_filepath, delegations) - if not child_role['backtrack'] and child_role_name is not None: + if child_role['terminating'] and child_role_name is not None: logger.debug('Adding child role ' + repr(child_role_name)) logger.debug('Not backtracking to other roles.') role_names = [] diff --git a/tuf/formats.py b/tuf/formats.py index 9c257bd902..8594a011ea 100755 --- a/tuf/formats.py +++ b/tuf/formats.py @@ -408,7 +408,7 @@ name = SCHEMA.Optional(ROLENAME_SCHEMA), keyids = KEYIDS_SCHEMA, threshold = THRESHOLD_SCHEMA, - backtrack = SCHEMA.Optional(BOOLEAN_SCHEMA), + terminating = SCHEMA.Optional(BOOLEAN_SCHEMA), paths = SCHEMA.Optional(RELPATHS_SCHEMA), path_hash_prefixes = SCHEMA.Optional(PATH_HASH_PREFIXES_SCHEMA)) @@ -459,9 +459,11 @@ # tuf.roledb ROLEDB_SCHEMA = SCHEMA.Object( object_name = 'ROLEDB_SCHEMA', - keyids = KEYIDS_SCHEMA, + keyids = SCHEMA.Optional(KEYIDS_SCHEMA), signing_keyids = SCHEMA.Optional(KEYIDS_SCHEMA), - threshold = THRESHOLD_SCHEMA, + previous_keyids = SCHEMA.Optional(KEYIDS_SCHEMA), + threshold = SCHEMA.Optional(THRESHOLD_SCHEMA), + previous_threshold = SCHEMA.Optional(THRESHOLD_SCHEMA), version = SCHEMA.Optional(METADATAVERSION_SCHEMA), expires = SCHEMA.Optional(ISO8601_DATETIME_SCHEMA), signatures = SCHEMA.Optional(SIGNATURES_SCHEMA), diff --git a/tuf/keys.py b/tuf/keys.py index 3582f665e2..5407a8b1c0 100755 --- a/tuf/keys.py +++ b/tuf/keys.py @@ -1128,6 +1128,10 @@ def extract_pem(pem, private_pem=False): pem: A string in PEM format. + private_pem: + Boolean indicating whether 'pem' contains a private key. 'pem' is + expected to begin and end with a private header and footer. + tuf.FormatError, if 'pem' is improperly formatted. diff --git a/tuf/pyca_crypto_keys.py b/tuf/pyca_crypto_keys.py index 89af89c728..35c76f8176 100755 --- a/tuf/pyca_crypto_keys.py +++ b/tuf/pyca_crypto_keys.py @@ -429,7 +429,7 @@ def verify_rsa_signature(signature, signature_method, public_key, data): # Verify the expected 'signature_method' value. if signature_method != 'RSASSA-PSS': raise tuf.UnknownMethodError(signature_method) - + # Verify the RSASSA-PSS signature with pyca/cryptography. try: public_key_object = serialization.load_pem_public_key(public_key.encode('utf-8'), @@ -454,12 +454,9 @@ def verify_rsa_signature(signature, signature_method, public_key, data): return False # Raised by load_pem_public_key(). - except ValueError: - raise tuf.CryptoError('The PEM could not be decoded successfully.') - - # Raised by load_pem_public_key(). - except cryptography.exceptions.UnsupportedAlgorithm: - raise tuf.CryptoError('The private key type is not supported.') + except (ValueError, cryptography.exceptions.UnsupportedAlgorithm) as e: + raise tuf.CryptoError('The PEM could not be decoded successfully,' + ' or contained an unsupported key type: ' + str(e)) @@ -468,17 +465,10 @@ def verify_rsa_signature(signature, signature_method, public_key, data): def create_rsa_encrypted_pem(private_key, passphrase): """ - Return a string in PEM format, where the private part of the RSA key is - encrypted. The private part of the RSA key is encrypted by the Triple - Data Encryption Algorithm (3DES) and Cipher-block chaining (CBC) for the - mode of operation. Password-Based Key Derivation Function 1 (PBKDF1) + MD5 - is used to strengthen 'passphrase'. - - TODO: Generate encrypted PEM (that matches PyCrypto's) once support is - added to pyca/cryptography. - - https://en.wikipedia.org/wiki/Triple_DES - https://en.wikipedia.org/wiki/PBKDF2 + Return a string in PEM format, where the private portion of the RSA key is + encrypted. The format of the encrypted PEM is PKCS8, while the encryption + algorithm used varies. pyca/cryptography will try to use the best + available encryption algorithm in this case. >>> public, private = generate_rsa_public_and_private(2048) >>> passphrase = 'secret' @@ -498,13 +488,14 @@ def create_rsa_encrypted_pem(private_key, passphrase): tuf.FormatError, if the arguments are improperly formatted. - tuf.CryptoError, if an RSA key in encrypted PEM format cannot be created. + tuf.CryptoError, if 'private_key' (private PEM format) cannot be + deserialized. TypeError, if 'private_key' is unset. - PyCrypto's Crypto.PublicKey.RSA.exportKey() called to perform the actual - generation of the PEM-formatted output. + pyca/cryptography's key serialization functions are called to the + PEM-formatted output. A string in PEM format, where the private RSA key is encrypted. @@ -521,35 +512,35 @@ def create_rsa_encrypted_pem(private_key, passphrase): tuf.formats.PASSWORD_SCHEMA.check_match(passphrase) # 'private_key' is in PEM format and unencrypted. The extracted key will be - # imported and converted to PyCrypto's RSA key object - # (i.e., Crypto.PublicKey.RSA). Use PyCrypto's exportKey method, with a - # passphrase specified, to create the string. PyCrypto uses PBKDF1+MD5 to - # strengthen 'passphrase', and 3DES with CBC mode for encryption. + # imported and converted to PyCA's RSA key object (. Use PyCA's + # private_bytes() method, with a passphrase specified, to create the expected + # format of the private key. In contrast, pycrypto_keys.py uses PBKDF1+MD5 + # to strengthen 'passphrase', and 3DES with CBC mode for encryption. # 'private_key' may still be a NULL string after the # 'tuf.formats.PEMRSA_SCHEMA' (i.e., 'private_key' has variable size and can # be an empty string. - # TODO: Use PyCrypto to generate the encrypted PEM string. Generating - # encrypted PEMs appears currently unsupported by pyca/cryptography. + if len(private_key): - try: - rsa_key_object = Crypto.PublicKey.RSA.importKey(private_key) - encrypted_pem = rsa_key_object.exportKey(format='PEM', - passphrase=passphrase) - - except (ValueError, IndexError, TypeError) as e: - raise tuf.CryptoError('An encrypted RSA key in PEM format cannot be' - ' generated: ' + str(e)) + try: + private_key = load_pem_private_key(private_key.encode('utf-8'), + password=None, + backend=default_backend()) + except ValueError: + raise tuf.CryptoError('The private key (in PEM format) could not be' + ' deserialized.') else: raise TypeError('The required private key is unset.') - + + encrypted_pem = \ + private_key.private_bytes(encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.BestAvailableEncryption(passphrase.encode('utf-8'))) return encrypted_pem.decode() - - def create_rsa_public_and_private_from_encrypted_pem(encrypted_pem, passphrase): """ @@ -738,7 +729,7 @@ def encrypt_key(key_object, password): tuf.formats.PASSWORD_SCHEMA.check_match(password) # Ensure the private portion of the key is included in 'key_object'. - if not key_object['keyval']['private']: + if 'private' not in key_object['keyval'] or not key_object['keyval']['private']: raise tuf.FormatError('Key object does not contain a private part.') # Derive a key (i.e., an appropriate encryption key and not the diff --git a/tuf/repository_lib.py b/tuf/repository_lib.py index ae3fa760f0..3e63eb2ba9 100755 --- a/tuf/repository_lib.py +++ b/tuf/repository_lib.py @@ -8,7 +8,7 @@ Vladimir Diaz - June 1, 2014 + June 1, 2014. See LICENSE for licensing information. @@ -101,19 +101,20 @@ # The full list of supported TUF metadata extensions. METADATA_EXTENSIONS = ['.json.gz', '.json'] +# The supported extensions of roles listed in Snapshot metadata. +SNAPSHOT_ROLE_EXTENSIONS = ['.json'] -def _generate_and_write_metadata(rolename, metadata_filename, write_partial, + +def _generate_and_write_metadata(rolename, metadata_filename, targets_directory, metadata_directory, consistent_snapshot=False, filenames=None, - compression_algorithms=['gz']): + compression_algorithms=['gz'], + allow_partially_signed=False, + increment_version_number=True): """ - Non-public function that can generate and write the metadata of the specified - top-level 'rolename'. It also increments version numbers if: - - 1. write_partial==True and the metadata is the first to be written. - - 2. write_partial=False (i.e., write()), the metadata was not loaded as - partially written, and a write_partial is not needed. + Non-public function that can generate and write the metadata for the + specified 'rolename'. It also increments the version number of 'rolename' if + the 'increment_version_number' argument is True. """ metadata = None @@ -121,11 +122,15 @@ def _generate_and_write_metadata(rolename, metadata_filename, write_partial, # Retrieve the roleinfo of 'rolename' to extract the needed metadata # attributes, such as version number, expiration, etc. roleinfo = tuf.roledb.get_roleinfo(rolename) - + previous_keyids = roleinfo.get('previous_keyids', []) + previous_threshold = roleinfo.get('previous_threshold', 1) + signing_keyids = list(set(roleinfo['signing_keyids'] + previous_keyids)) + # Generate the appropriate role metadata for 'rolename'. - if rolename == 'root': + if rolename == 'root': metadata = generate_root_metadata(roleinfo['version'], - roleinfo['expires'], consistent_snapshot, + roleinfo['expires'], + consistent_snapshot, compression_algorithms) _log_warning_if_expires_soon(ROOT_FILENAME, roleinfo['expires'], @@ -169,59 +174,91 @@ def _generate_and_write_metadata(rolename, metadata_filename, write_partial, roleinfo['expires'], roleinfo['delegations'], consistent_snapshot) + + if rolename in ['root', 'targets', 'snapshot', 'timestamp'] and not allow_partially_signed: + # Before writing 'rolename' to disk, increment its version number and + # verify that it is fully signed. Only delegated roles should not be + # written to disk without full verification of its signatures, since they + # can only be considered fully signed depending on the delegating role. + roleinfo = tuf.roledb.get_roleinfo(rolename) + current_version = metadata['version'] + metadata['version'] = metadata['version'] + 1 + roleinfo['version'] = roleinfo['version'] + 1 + + tuf.roledb.update_roleinfo(rolename, roleinfo) + signable = sign_metadata(metadata, signing_keyids, metadata_filename) + + + def should_write(): + # Root must be signed by its previous keys and threshold. + if rolename == 'root' and len(previous_keyids) > 0: + if not tuf.sig.verify(signable, rolename, 'default', previous_threshold, + previous_keyids): + return False + + else: + logger.debug('Root is signed by a threshold of its previous keyids.') + + # In the normal case, we should write metadata if the threshold is met. + return tuf.sig.verify(signable, rolename, 'default', roleinfo['threshold'], + roleinfo['signing_keyids']) + + + if should_write(): + _remove_invalid_and_duplicate_signatures(signable) + filename = write_metadata_file(signable, metadata_filename, + metadata['version'], compression_algorithms, + consistent_snapshot) + + # The root and timestamp files should also be written without a version + # number prepended if 'consistent_snaptshot' is True. Clients may request + # a timestamp and root file without knowing their version numbers. + if rolename == 'root' or rolename == 'timestamp': + write_metadata_file(signable, metadata_filename, metadata['version'], + compression_algorithms, consistent_snapshot=False) + + # The root role should always be accessible by version number, so that + # clients can walk through root history to update keys. + if rolename == 'root': + write_metadata_file(signable, metadata_filename, metadata['version'], + compression_algorithms, consistent_snapshot=True) - signable = sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename) - - # Check if the version number of 'rolename' may be automatically incremented, - # depending on whether if partial metadata is loaded or if the metadata is - # written with write() / write_partial(). - # Increment the version number if this is the first partial write. - if write_partial: - temp_signable = sign_metadata(metadata, [], metadata_filename) - temp_signable['signatures'].extend(roleinfo['signatures']) - status = tuf.sig.get_signature_status(temp_signable, rolename) - if len(status['good_sigs']) == 0: - metadata['version'] = metadata['version'] + 1 + # 'signable' contains an invalid threshold of signatures. + else: + # Since new metadata cannot be successfully written, reset the version + # number. roleinfo = tuf.roledb.get_roleinfo(rolename) - roleinfo['version'] = roleinfo['version'] + 1 + roleinfo['version'] = current_version tuf.roledb.update_roleinfo(rolename, roleinfo) - signable = sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename) - # non-partial write() + + message = 'Not enough signatures for ' + repr(metadata_filename) + raise tuf.UnsignedMetadataError(message, signable) + + # 'rolename' is a delegated role or a top-level role that is partially + # signed, and thus its signatures shouldn't be verified. else: # If writing a new version of 'rolename,' increment its version number in # both the metadata file and roledb (required so that snapshot references # the latest version). - if tuf.sig.verify(signable, rolename) and not roleinfo['partial_loaded']: + roleinfo = tuf.roledb.get_roleinfo(rolename) + if increment_version_number: metadata['version'] = metadata['version'] + 1 - roleinfo = tuf.roledb.get_roleinfo(rolename) roleinfo['version'] = roleinfo['version'] + 1 - tuf.roledb.update_roleinfo(rolename, roleinfo) - signable = sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename) - - # Write the metadata to file if it contains a threshold of signatures. - signable['signatures'].extend(roleinfo['signatures']) - - if tuf.sig.verify(signable, rolename) or write_partial: + + tuf.roledb.update_roleinfo(rolename, roleinfo) + signable = sign_metadata(metadata, signing_keyids, metadata_filename) _remove_invalid_and_duplicate_signatures(signable) - filename = write_metadata_file(signable, metadata_filename, - metadata['version'], compression_algorithms, - consistent_snapshot) - - # The root and timestamp files should also be written without a version - # number prepended if 'consistent_snaptshot' is True. Clients may request - # a timestamp and root file without knowing their version numbers. - if rolename == 'root' or rolename == 'timestamp': - write_metadata_file(signable, metadata_filename, metadata['version'], - compression_algorithms, consistent_snapshot=False) - - # 'signable' contains an invalid threshold of signatures. - else: - message = 'Not enough signatures for ' + repr(metadata_filename) - raise tuf.UnsignedMetadataError(message, signable) - + + if rolename == 'root': + filename = write_metadata_file(signable, metadata_filename, + metadata['version'], compression_algorithms, + consistent_snapshot=True) + + else: + filename = write_metadata_file(signable, metadata_filename, + metadata['version'], compression_algorithms, + consistent_snapshot) + return signable, filename @@ -230,7 +267,7 @@ def _generate_and_write_metadata(rolename, metadata_filename, write_partial, def _prompt(message, result_type=str): """ - Non-public function that prompts the user for input by loging 'message', + Non-public function that prompts the user for input by logging 'message', converting the input to 'result_type', and returning the value to the caller. """ @@ -395,6 +432,7 @@ def _remove_invalid_and_duplicate_signatures(signable): # Remove 'signature' from 'signable' if it is an invalid signature. if not tuf.keys.verify_signature(key, signature, signed): + logger.debug('Removing invalid signature for ' + repr(keyid)) signable['signatures'].remove(signature) # Although valid, it may still need removal if it is a duplicate. Check @@ -434,20 +472,25 @@ def _delete_obsolete_metadata(metadata_directory, snapshot_metadata, # 'files' here is a list of target file names. for basename in files: + + # don't delete previous root files + if basename.endswith('root.json'): + return + metadata_path = os.path.join(directory_path, basename) # Strip the metadata dirname and the leading path separator. # '{repository_directory}/metadata/django.json' --> # 'django.json' metadata_name = \ metadata_path[len(metadata_directory):].lstrip(os.path.sep) - + # Strip the version number if 'consistent_snapshot' is True. Example: # '10.django.json' --> 'django.json'. Consistent and non-consistent # metadata might co-exist if write() and # write(consistent_snapshot=True) are mixed, so ensure only # '.filename' metadata is stripped. embedded_version_number = None - + # Should we check if 'consistent_snapshot' is True? It might have been # set previously, but 'consistent_snapshot' can potentially be False # now. We'll proceed with the understanding that 'metadata_name' can @@ -459,12 +502,14 @@ def _delete_obsolete_metadata(metadata_directory, snapshot_metadata, else: logger.debug(repr(metadata_name) + ' found in the snapshot role.') + + # Strip filename extensions. The role database does not include the # metadata extension. metadata_name_extension = metadata_name - for metadata_extension in METADATA_EXTENSIONS: + for metadata_extension in METADATA_EXTENSIONS: #pragma: no branch if metadata_name.endswith(metadata_extension): metadata_name = metadata_name[:-len(metadata_extension)] break @@ -472,7 +517,7 @@ def _delete_obsolete_metadata(metadata_directory, snapshot_metadata, else: logger.debug(repr(metadata_name) + ' does not match' ' supported extension ' + repr(metadata_extension)) - + if metadata_name in ['root', 'targets', 'snapshot', 'timestamp']: return @@ -532,7 +577,7 @@ def _strip_version_number(metadata_filename, consistent_snapshot): else: return stripped_metadata_filename, version_number - + else: return metadata_filename, '' @@ -731,22 +776,13 @@ def _load_top_level_metadata(repository, top_level_filenames): # key when it was added. try: tuf.keydb.add_key(key_object) - for keyid in keyids: + for keyid in keyids: #pragma: no branch key_object['keyid'] = keyid tuf.keydb.add_key(key_object, keyid=None) except tuf.KeyAlreadyExistsError: pass - - for role in targets_metadata['delegations']['roles']: - rolename = role['name'] - roleinfo = {'name': role['name'], 'keyids': role['keyids'], - 'threshold': role['threshold'], 'compressions': [''], - 'signing_keyids': [], 'partial_loaded': False, 'paths': {}, - 'signatures': [], 'delegations': {'keys': {}, - 'roles': []}} - tuf.roledb.add_role(rolename, roleinfo) - + else: logger.debug('The Targets file cannot be loaded: ' + repr(targets_filename)) @@ -1503,7 +1539,7 @@ def generate_root_metadata(version, expiration_date, consistent_snapshot, consistent_snapshot, compression_algorithms) - return root_metadata + return root_metadata @@ -1515,8 +1551,8 @@ def generate_targets_metadata(targets_directory, target_files, version, """ Generate the targets metadata object. The targets in 'target_files' must - exist at the same path they should on the repo. 'target_files' is a list of - targets. The 'custom' field of the targets metadata is not currently + exist at the same path they should on the repo. 'target_files' is a list + of targets. The 'custom' field of the targets metadata is not currently supported. @@ -1555,6 +1591,10 @@ def generate_targets_metadata(targets_directory, target_files, version, The target files are read and file information generated about them. + If 'write_consistent_targets' is True, hard links are created for + the targets in 'target_files'. For example, if 'some_file.txt' is one + of the targets of 'target_files', consistent targets + .some_file.txt, .some_file.txt, etc., are created. A targets metadata object, conformant to 'tuf.formats.TARGETS_SCHEMA'. @@ -1600,7 +1640,6 @@ def generate_targets_metadata(targets_directory, target_files, version, raise tuf.Error(repr(target_path) + ' cannot be read.' ' Unable to generate targets metadata.') - # Add 'custom' if it has been provided. Custom data about the target is # optional and will only be included in metadata (i.e., a 'custom' field in # the target's fileinfo dictionary) if specified here. @@ -1726,7 +1765,7 @@ def generate_snapshot_metadata(metadata_directory, version, expiration_date, _strip_version_number(metadata_filename, consistent_snapshot) # All delegated roles are added to the snapshot file. - for metadata_extension in METADATA_EXTENSIONS: + for metadata_extension in SNAPSHOT_ROLE_EXTENSIONS: if metadata_filename.endswith(metadata_extension): rolename = metadata_filename[:-len(metadata_extension)] @@ -1869,8 +1908,12 @@ def sign_metadata(metadata_object, keyids, filename): if key['keytype'] in SUPPORTED_KEY_TYPES: if 'private' in key['keyval']: signed = signable['signed'] - signature = tuf.keys.create_signature(key, signed) - signable['signatures'].append(signature) + try: + signature = tuf.keys.create_signature(key, signed) + signable['signatures'].append(signature) + + except Exception: + logger.warning('Unable to create signature for keyid: ' + repr(keyid)) else: logger.warning('Private key unset. Skipping: ' + repr(keyid)) @@ -1974,23 +2017,35 @@ def write_metadata_file(metadata, filename, version_number, # and indentation is used. The 'tuf.util.TempFile' file-like object is # automically closed after the final move. file_object.write(file_content) - logger.debug('Saving ' + repr(written_filename)) - file_object.move(written_filename) - if consistent_snapshot: dirname, basename = os.path.split(written_filename) - basename = basename.split(METADATA_EXTENSION, 1)[0] version_and_filename = str(version_number) + '.' + basename + METADATA_EXTENSION written_consistent_filename = os.path.join(dirname, version_and_filename) - logger.info('Linking ' + repr(written_consistent_filename)) - os.link(written_filename, written_consistent_filename) - + # If we were to point consistent snapshots to 'written_filename', they + # would always point to the current version. Example: 1.root.json and + # 2.root.json -> root.json. If consistent snapshot is True, we should save + # the consistent snapshot and point 'written_filename' to it. + logger.info('Creating a consistent snapshot for ' + repr(written_filename)) + logger.debug('Saving ' + repr(written_consistent_filename)) + file_object.move(written_consistent_filename) + + # TODO: We should provide the option of either (1) creating a link via + # os.link() to the consistent snapshot or (2) creating a copy of the + # consistent snapshot and saving to its expected filename (e.g., + # root.json). The option should be a configurable in tuf.conf.py. + # For now, we create a copy of the consistent snapshot and save it to + # 'written_filename'. + logger.info('Pointing ' + repr(filename) + ' to the consistent snapshot.') + shutil.copyfile(written_consistent_filename, written_filename) + else: - logger.info('Not linking a consistent filename for: ' + repr(written_filename)) - + logger.info('Not creating a consistent snapshot for ' + repr(written_filename)) + logger.debug('Saving ' + repr(written_filename)) + file_object.move(written_filename) + # Generate the compressed versions of 'metadata', if necessary. A compressed # file may be written (without needing to write the uncompressed version) if # the repository maintainer adds compression after writing the uncompressed @@ -2135,7 +2190,7 @@ def _log_status_of_top_level_roles(targets_directory, metadata_directory): # Verify the metadata of the Root role. try: signable, root_filename = \ - _generate_and_write_metadata('root', root_filename, False, + _generate_and_write_metadata('root', root_filename, targets_directory, metadata_directory) _log_status('root', signable) @@ -2148,7 +2203,7 @@ def _log_status_of_top_level_roles(targets_directory, metadata_directory): # Verify the metadata of the Targets role. try: signable, targets_filename = \ - _generate_and_write_metadata('targets', targets_filename, False, + _generate_and_write_metadata('targets', targets_filename, targets_directory, metadata_directory) _log_status('targets', signable) @@ -2160,7 +2215,7 @@ def _log_status_of_top_level_roles(targets_directory, metadata_directory): filenames = {'root': root_filename, 'targets': targets_filename} try: signable, snapshot_filename = \ - _generate_and_write_metadata('snapshot', snapshot_filename, False, + _generate_and_write_metadata('snapshot', snapshot_filename, targets_directory, metadata_directory, False, filenames) _log_status('snapshot', signable) @@ -2173,7 +2228,7 @@ def _log_status_of_top_level_roles(targets_directory, metadata_directory): filenames = {'snapshot': snapshot_filename} try: signable, timestamp_filename = \ - _generate_and_write_metadata('timestamp', timestamp_filename, False, + _generate_and_write_metadata('timestamp', timestamp_filename, targets_directory, metadata_directory, False, filenames) _log_status('timestamp', signable) @@ -2193,9 +2248,9 @@ def _log_status(rolename, signable): status = tuf.sig.get_signature_status(signable, rolename) - message = repr(rolename) + ' role contains ' + repr(len(status['good_sigs']))+\ - ' / ' + repr(status['threshold']) + ' signatures.' - logger.info(message) + logger.info(repr(rolename) + ' role contains ' + \ + repr(len(status['good_sigs'])) + ' / ' + repr(status['threshold']) + \ + ' signatures.') diff --git a/tuf/repository_tool.py b/tuf/repository_tool.py index a1d1682a1e..6f8bb1b135 100755 --- a/tuf/repository_tool.py +++ b/tuf/repository_tool.py @@ -178,23 +178,15 @@ def __init__(self, repository_directory, metadata_directory, targets_directory): - def write(self, write_partial=False, consistent_snapshot=False, - compression_algorithms=['gz']): + def writeall(self, consistent_snapshot=False, compression_algorithms=['gz']): """ Write all the JSON Metadata objects to their corresponding files. - write() raises an exception if any of the role metadata to be written to - disk is invalid, such as an insufficient threshold of signatures, missing - private keys, etc. + writeall() raises an exception if any of the role metadata to be written + to disk is invalid, such as an insufficient threshold of signatures, + missing private keys, etc. - write_partial: - A boolean indicating whether partial metadata should be written to - disk. Partial metadata may be written to allow multiple maintainters - to independently sign and update role metadata. write() raises an - exception if a metadata role cannot be written due to not having enough - signatures. - consistent_snapshot: A boolean indicating whether written metadata and target files should include a version number in the filename (i.e., @@ -218,22 +210,20 @@ def write(self, write_partial=False, consistent_snapshot=False, None. """ - # Does 'write_partial' have the correct format? + # Do the arguments have the correct format? # Ensure the arguments have the appropriate number of objects and object # types, and that all dict keys are properly named. # Raise 'tuf.FormatError' if any are improperly formatted. - tuf.formats.BOOLEAN_SCHEMA.check_match(write_partial) tuf.formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) tuf.formats.COMPRESSIONS_SCHEMA.check_match(compression_algorithms) - # At this point the tuf.keydb and tuf.roledb stores must be fully - # populated, otherwise write() throws a 'tuf.UnsignedMetadataError' - # exception if any of the top-level roles are missing signatures, keys, etc. + # At this point, tuf.keydb and tuf.roledb must be fully populated, + # otherwise writeall() throws a 'tuf.UnsignedMetadataError' for the + # top-level roles. exception if any of the top-level roles are missing + # signatures, keys, etc. # Write the metadata files of all the Targets roles that are dirty (i.e., # have been modified via roledb.update_roleinfo()). - dirty_roles = tuf.roledb.get_dirty_roles() - filenames = {'root': os.path.join(self._metadata_directory, repo_lib.ROOT_FILENAME), 'targets': os.path.join(self._metadata_directory, repo_lib.TARGETS_FILENAME), 'snapshot': os.path.join(self._metadata_directory, repo_lib.SNAPSHOT_FILENAME), @@ -252,20 +242,17 @@ def write(self, write_partial=False, consistent_snapshot=False, dirty_rolename + METADATA_EXTENSION) repo_lib._generate_and_write_metadata(dirty_rolename, dirty_filename, - write_partial, self._targets_directory, self._metadata_directory, consistent_snapshot, filenames) - # Metadata should be written in (delegated targets -> root -> - # targets -> snapshot -> timestamp) order. - # Begin by generating the 'root.json' metadata file. - # _generate_and_write_metadata() raises a 'tuf.Error' exception if the - # metadata cannot be written. + # Metadata should be written in (delegated targets -> root -> targets -> + # snapshot -> timestamp) order. Begin by generating the 'root.json' + # metadata file. _generate_and_write_metadata() raises a 'tuf.Error' + # exception if the metadata cannot be written. if 'root' in dirty_rolenames or consistent_snapshot: repo_lib._generate_and_write_metadata('root', filenames['root'], - write_partial, self._targets_directory, self._metadata_directory, consistent_snapshot, @@ -274,7 +261,6 @@ def write(self, write_partial=False, consistent_snapshot=False, # Generate the 'targets.json' metadata file. if 'targets' in dirty_rolenames: repo_lib._generate_and_write_metadata('targets', filenames['targets'], - write_partial, self._targets_directory, self._metadata_directory, consistent_snapshot) @@ -283,7 +269,6 @@ def write(self, write_partial=False, consistent_snapshot=False, if 'snapshot' in dirty_rolenames: snapshot_signable, junk = \ repo_lib._generate_and_write_metadata('snapshot', filenames['snapshot'], - write_partial, self._targets_directory, self._metadata_directory, consistent_snapshot, filenames) @@ -291,7 +276,6 @@ def write(self, write_partial=False, consistent_snapshot=False, # Generate the 'timestamp.json' metadata file. if 'timestamp' in dirty_rolenames: repo_lib._generate_and_write_metadata('timestamp', filenames['timestamp'], - write_partial, self._targets_directory, self._metadata_directory, consistent_snapshot, filenames) @@ -307,14 +291,27 @@ def write(self, write_partial=False, consistent_snapshot=False, - def write_partial(self): + def write(self, rolename, consistent_snapshot=False, increment_version_number=True): """ - Write all the JSON metadata to their corresponding files, but allow - metadata files to contain an invalid threshold of signatures. + Write the JSON metadata for 'rolename' to its corresponding file on disk. + Unlike writeall(), write() allows the metadata file to contain an invalid + threshold of signatures. - None. + rolename: + The name of the role to be written to disk. + + consistent_snapshot: + A boolean indicating whether written metadata and target files should + include a version number in the filename (i.e., + .root.json, .targets.json.gz, + .README.json + Example: 13.root.json' + + increment_version_number: + Boolean indicating whether the version number of 'rolename' should be + automatically incremented. None. @@ -326,17 +323,34 @@ def write_partial(self): None. """ - self.write(write_partial=True) + rolename_filename = os.path.join(self._metadata_directory, + rolename + METADATA_EXTENSION) + + filenames = {'root': os.path.join(self._metadata_directory, repo_lib.ROOT_FILENAME), + 'targets': os.path.join(self._metadata_directory, repo_lib.TARGETS_FILENAME), + 'snapshot': os.path.join(self._metadata_directory, repo_lib.SNAPSHOT_FILENAME), + 'timestamp': os.path.join(self._metadata_directory, repo_lib.TIMESTAMP_FILENAME)} + + repo_lib._generate_and_write_metadata(rolename, + rolename_filename, + self._targets_directory, + self._metadata_directory, + consistent_snapshot, + filenames=filenames, + allow_partially_signed=True, + increment_version_number=increment_version_number) + + def status(self): """ Determine the status of the top-level roles, including those delegated by the Targets role. status() checks if each role provides sufficient public and private keys, signatures, and that a valid metadata file is generated - if write() were to be called. Metadata files are temporarily written so + if writeall() were to be called. Metadata files are temporarily written so that file hashes and lengths may be verified, determine if delegated role trust is fully obeyed, and target paths valid according to parent roles. status() does not do a simple check for number of threshold keys and @@ -396,7 +410,7 @@ def status(self): continue try: - repo_lib._generate_and_write_metadata(delegated_role, filename, False, + repo_lib._generate_and_write_metadata(delegated_role, filename, targets_directory, metadata_directory) except tuf.UnsignedMetadataError: @@ -432,7 +446,7 @@ def dirty_roles(self): roles printed/logged here. Unlike status(), signatures, public keys, targets, etc. are not verified. status() should be called instead if the caller would like to verify if a valid role file is generated - if write() were to be called. + if writeall() were to be called. None. @@ -667,10 +681,13 @@ def add_verification_key(self, key, expires=None): keyid = key['keyid'] roleinfo = tuf.roledb.get_roleinfo(self.rolename) + + previous_keyids = roleinfo['keyids'] # Add 'key' to the role's entry in 'tuf.roledb.py' and avoid duplicates. - if keyid not in roleinfo['keyids']: + if keyid not in previous_keyids: roleinfo['keyids'].append(keyid) + roleinfo['previous_keyids'] = previous_keyids tuf.roledb.update_roleinfo(self._rolename, roleinfo) @@ -739,8 +756,8 @@ def load_signing_key(self, key): key: The role's key, conformant to 'tuf.formats.ANYKEY_SCHEMA'. It must contain the private key, so that role signatures may be generated when - write() or write_partial() is eventually called to generate valid - metadata files. + writeall() or write() is eventually called to generate valid metadata + files. tuf.FormatError, if 'key' is improperly formatted. @@ -886,6 +903,9 @@ def add_signature(self, signature, mark_role_as_dirty=True): roleinfo['signatures'].append(signature) tuf.roledb.update_roleinfo(self.rolename, roleinfo, mark_role_as_dirty) + else: + logger.debug('Signature already exists for role: ' + repr(self.rolename)) + def remove_signature(self, signature): @@ -1152,6 +1172,7 @@ def threshold(self, threshold): tuf.formats.THRESHOLD_SCHEMA.check_match(threshold) roleinfo = tuf.roledb.get_roleinfo(self._rolename) + roleinfo['previous_threshold'] = roleinfo['threshold'] roleinfo['threshold'] = threshold tuf.roledb.update_roleinfo(self._rolename, roleinfo) @@ -2087,7 +2108,7 @@ def get_delegated_rolenames(self): def delegate(self, rolename, public_keys, list_of_targets, threshold=1, - backtrack=True, restricted_paths=None, path_hash_prefixes=None): + terminating=False, restricted_paths=None, path_hash_prefixes=None): """ Create a new delegation, where 'rolename' is a child delegation of this @@ -2118,13 +2139,13 @@ def delegate(self, rolename, public_keys, list_of_targets, threshold=1, threshold: The threshold number of keys of 'rolename'. - backtrack: + terminating: Boolean that indicates whether this role allows the updater client to continue searching for targets (target files it is trusted to list - but has not yet specified) in other delegations. If 'backtrack' is - False and 'updater.target()' does not find 'example_target.tar.gz' in + but has not yet specified) in other delegations. If 'terminating' is + True and 'updater.target()' does not find 'example_target.tar.gz' in this role, a 'tuf.UnknownTargetError' exception should be raised. If - 'backtrack' is True (default), and 'target/other_role' is also trusted + 'terminatin' is False (default), and 'target/other_role' is also trusted with 'example_target.tar.gz' and has listed it, updater.target() should backtrack and return the target file specified by 'target/other_role'. @@ -2161,7 +2182,7 @@ def delegate(self, rolename, public_keys, list_of_targets, threshold=1, tuf.formats.ANYKEYLIST_SCHEMA.check_match(public_keys) tuf.formats.RELPATHS_SCHEMA.check_match(list_of_targets) tuf.formats.THRESHOLD_SCHEMA.check_match(threshold) - tuf.formats.BOOLEAN_SCHEMA.check_match(backtrack) + tuf.formats.BOOLEAN_SCHEMA.check_match(terminating) if restricted_paths is not None: tuf.formats.RELPATHS_SCHEMA.check_match(restricted_paths) @@ -2239,7 +2260,7 @@ def delegate(self, rolename, public_keys, list_of_targets, threshold=1, roleinfo = {'name': rolename, 'keyids': roleinfo['keyids'], 'threshold': roleinfo['threshold'], - 'backtrack': backtrack, + 'terminating': terminating, 'paths': list(roleinfo['paths'].keys())} if restricted_paths is not None: @@ -2281,7 +2302,7 @@ def revoke(self, rolename): 'tuf.roledb'. Actual metadata files are not updated, only when repository.write() or - repository.write_partial() is called. + repository.write() is called. >>> >>> @@ -2829,6 +2850,9 @@ def load_repository(repository_directory): tuf.formats.PATH_SCHEMA.check_match(repository_directory) # Load top-level metadata. + #tuf.roledb.clear_roledb(clear_all=True) + #tuf.keydb.clear_keydb(clear_all=True) + repository_directory = os.path.abspath(repository_directory) metadata_directory = os.path.join(repository_directory, METADATA_STAGED_DIRECTORY_NAME) @@ -2864,9 +2888,12 @@ def load_repository(repository_directory): metadata_path = os.path.join(metadata_directory, metadata_role) metadata_name = \ metadata_path[len(metadata_directory):].lstrip(os.path.sep) - - # Strip the version number if 'consistent_snapshot' is True. + + # Strip the version number if 'consistent_snapshot' is True, + # or if 'metadata_role' is Root. # Example: '10.django.json' --> 'django.json' + consistent_snapshot = \ + metadata_role.endswith('root.json') or consistent_snapshot == True metadata_name, version_number_junk = \ repo_lib._strip_version_number(metadata_name, consistent_snapshot) @@ -2901,7 +2928,14 @@ def load_repository(repository_directory): # Extract the metadata attributes of 'metadata_name' and update its # corresponding roleinfo. - roleinfo = tuf.roledb.get_roleinfo(metadata_name) + roleinfo = {'name': metadata_name, + 'signing_keyids': [], + 'signatures': [], + 'partial_loaded': False, + 'compressions': [], + 'paths': {}, + } + roleinfo['signatures'].extend(signable['signatures']) roleinfo['version'] = metadata_object['version'] roleinfo['expires'] = metadata_object['expires'] @@ -2912,13 +2946,8 @@ def load_repository(repository_directory): if os.path.exists(metadata_path + '.gz'): roleinfo['compressions'].append('gz') - - # The roleinfo of 'metadata_name' should have been initialized with - # defaults when it was loaded from its parent role. - if repo_lib._metadata_is_partially_loaded(metadata_name, signable, roleinfo): - roleinfo['partial_loaded'] = True - - tuf.roledb.update_roleinfo(metadata_name, roleinfo, mark_role_as_dirty=False) + + tuf.roledb.add_role(metadata_name, roleinfo) loaded_metadata.append(metadata_name) # Generate the Targets objects of the delegated roles of 'metadata_name' @@ -2938,27 +2967,13 @@ def load_repository(repository_directory): # The repository maintainer should have also been made aware of the # duplicate key when it was added. for key_metadata in six.itervalues(metadata_object['delegations']['keys']): - key_object = tuf.keys.format_metadata_to_key(key_metadata) - try: + key_object, junk = tuf.keys.format_metadata_to_key(key_metadata) + try: tuf.keydb.add_key(key_object) except tuf.KeyAlreadyExistsError: pass - - # Add the delegated role's initial roleinfo, to be fully populated - # when its metadata file is next loaded in one of the next iterations. - for role in metadata_object['delegations']['roles']: - rolename = role['name'] - roleinfo = {'name': role['name'], 'keyids': role['keyids'], - 'threshold': role['threshold'], - 'compressions': [''], 'signing_keyids': [], - 'signatures': [], - 'paths': {}, - 'partial_loaded': False, - 'delegations': {'keys': {}, - 'roles': []}} - tuf.roledb.add_role(rolename, roleinfo) - + return repository diff --git a/tuf/roledb.py b/tuf/roledb.py index d0bfb4a194..2fe36df0d5 100755 --- a/tuf/roledb.py +++ b/tuf/roledb.py @@ -118,12 +118,14 @@ def create_roledb_from_root_metadata(root_metadata, repository_name='default'): # Do not modify the contents of the 'root_metadata' argument. root_metadata = copy.deepcopy(root_metadata) - # Iterate through the roles found in 'root_metadata' - # and add them to '_roledb_dict'. Duplicates are avoided. + # Iterate the roles found in 'root_metadata' and add them to '_roledb_dict'. + # Duplicates are avoided. for rolename, roleinfo in six.iteritems(root_metadata['roles']): if rolename == 'root': roleinfo['version'] = root_metadata['version'] roleinfo['expires'] = root_metadata['expires'] + roleinfo['previous_keyids'] = roleinfo['keyids'] + roleinfo['previous_threshold'] = roleinfo['threshold'] roleinfo['signatures'] = [] roleinfo['signing_keyids'] = [] diff --git a/tuf/sig.py b/tuf/sig.py index 42760af050..ac2ad9ac54 100755 --- a/tuf/sig.py +++ b/tuf/sig.py @@ -43,22 +43,33 @@ from __future__ import division from __future__ import unicode_literals +import logging + import tuf import tuf.formats import tuf.keydb import tuf.roledb -def get_signature_status(signable, role=None, repository_name='default'): +# See 'log.py' to learn how logging is handled in TUF. +logger = logging.getLogger('tuf.sig') + +# Disable 'iso8601' logger messages to prevent 'iso8601' from clogging the +# log file. +iso8601_logger = logging.getLogger('iso8601') +iso8601_logger.disabled = True + + +def get_signature_status(signable, role=None, repository_name='default', + threshold=None, keyids=None): """ - Return a dictionary representing the status of the signatures listed - in 'signable'. Given an object conformant to SIGNABLE_SCHEMA, a set - of public keys in 'tuf.keydb', a set of roles in 'tuf.roledb', - and a role, the status of these signatures can be determined. This - method will iterate through the signatures in 'signable' and enumerate - all the keys that are valid, invalid, unrecognized, unauthorized, or - generated using an unknown method. + Return a dictionary representing the status of the signatures listed in + 'signable'. Given an object conformant to SIGNABLE_SCHEMA, a set of public + keys in 'tuf.keydb', a set of roles in 'tuf.roledb', and a role, the status + of these signatures can be determined. This method will iterate the + signatures in 'signable' and enumerate all the keys that are valid, + invalid, unrecognized, unauthorized, or generated using an unknown method. signable: @@ -71,19 +82,24 @@ def get_signature_status(signable, role=None, repository_name='default'): role: TUF role (e.g., 'root', 'targets', 'snapshot'). - - repository_name: - The name of the repository to check the signature status. The roledb - keeps a separate set of roles for each repository. If not supplied, the - signature status is verified for the 'role' in the 'default' repository. + + threshold: + Rather than reference the role's threshold as set in tuf.roledb.py, use + the given 'threshold' to calculate the signature status of 'signable'. + 'threshold' is an integer value that sets the role's threshold value, or + the miminum number of signatures needed for metadata to be considered + fully signed. + + keyids: + Similar to the 'threshold' argument, use the supplied list of 'keyids' + to calculate the signature status, instead of referencing the keyids + in tuf.roledb.py for 'role'. tuf.FormatError, if 'signable' does not have the correct format. tuf.UnknownRoleError, if 'role' is not recognized. - tuf.InvalidNameError, if 'repository_name' does not exist in the role db. - None. @@ -101,6 +117,12 @@ def get_signature_status(signable, role=None, repository_name='default'): if role is not None: tuf.formats.ROLENAME_SCHEMA.check_match(role) + + if threshold is not None: + tuf.formats.THRESHOLD_SCHEMA.check_match(threshold) + + if keyids is not None: + tuf.formats.KEYIDS_SCHEMA.check_match(keyids) # The signature status dictionary returned. signature_status = {} @@ -108,10 +130,11 @@ def get_signature_status(signable, role=None, repository_name='default'): # The fields of the signature_status dict. A description of each field: # good_sigs = keys confirmed to have produced 'sig' and 'method' using # 'signed' and that are associated with 'role'; bad_sigs = negation of - # good_sigs; unknown_sigs = keys not found in the 'keydb' database; - # untrusted_sigs = keys that are not in the list of keyids associated - # with 'role'; unknown_method_sigs = keys found to have used an - # unsupported method of generating signatures. + # good_sigs; unknown_sigs = keys not found in the 'keydb' database; + # untrusted_sigs = keys that are not in the list of keyids associated with + # 'role'; + # unknown_method_sigs = keys found to have used an unsupported method + # of generating signatures. good_sigs = [] bad_sigs = [] unknown_sigs = [] @@ -149,27 +172,33 @@ def get_signature_status(signable, role=None, repository_name='default'): # We are now dealing with a valid key. if valid_sig: if role is not None: + try: - # Identify unauthorized key. - if keyid not in tuf.roledb.get_role_keyids(role, repository_name): + # Identify unauthorized key. + if keyids is None: + keyids = tuf.roledb.get_role_keyids(role, repository_name) + + if keyid not in keyids: untrusted_sigs.append(keyid) continue # Unknown role, re-raise exception. except tuf.UnknownRoleError: raise + # Identify good/authorized key. good_sigs.append(keyid) else: # Identify bad key. - bad_sigs.append(keyid) - + bad_sigs.append(keyid) + # Retrieve the threshold value for 'role'. Raise tuf.UnknownRoleError # if we were given an invalid role. if role is not None: try: - threshold = tuf.roledb.get_role_threshold(role, repository_name) + threshold = \ + tuf.roledb.get_role_threshold(role, repository_name=repository_name) except tuf.UnknownRoleError: raise @@ -178,7 +207,7 @@ def get_signature_status(signable, role=None, repository_name='default'): threshold = 0 # Build the signature_status dict. - signature_status['threshold'] = threshold + signature_status['threshold'] = threshold signature_status['good_sigs'] = good_sigs signature_status['bad_sigs'] = bad_sigs signature_status['unknown_sigs'] = unknown_sigs @@ -191,7 +220,8 @@ def get_signature_status(signable, role=None, repository_name='default'): -def verify(signable, role, repository_name='default'): +def verify(signable, role, repository_name='default', + threshold=None, keyids=None): """ Verify whether the authorized signatures of 'signable' meet the minimum @@ -206,12 +236,18 @@ def verify(signable, role, repository_name='default'): role: TUF role (e.g., 'root', 'targets', 'snapshot'). - - repository_name: - The name of the repository to verify 'signable'. The role and key db - modules keep track of separate sets of roles and keys for each - repository. If 'repository_name' is not supplied, the 'default' - repository is queried. + + threshold: + Rather than reference the role's threshold as set in tuf.roledb.py, use + the given 'threshold' to calculate the signature status of 'signable'. + 'threshold' is an integer value that sets the role's threshold value, or + the miminum number of signatures needed for metadata to be considered + fully signed. + + keyids: + Similar to the 'threshold' argument, use the supplied list of 'keyids' + to calculate the signature status, instead of referencing the keyids + in tuf.roledb.py for 'role'. tuf.UnknownRoleError, if 'role' is not recognized. @@ -220,9 +256,6 @@ def verify(signable, role, repository_name='default'): tuf.Error, if an invalid threshold is encountered. - tuf.InvalidNameError, if 'repository_name' does not exist in either the - role or key db. - tuf.sig.get_signature_status() called. Any exceptions thrown by get_signature_status() will be caught here and re-raised. @@ -231,17 +264,16 @@ def verify(signable, role, repository_name='default'): Boolean. True if the number of good signatures >= the role's threshold, False otherwise. """ - - # Do the arguments have the correct format? If not, raise 'tuf.FormatError'. - tuf.formats.SIGNABLE_SCHEMA.check_match(signable) + + tuf.formats.SIGNABLE_SCHEMA.check_match(signable) tuf.formats.ROLENAME_SCHEMA.check_match(role) tuf.formats.NAME_SCHEMA.check_match(repository_name) # Retrieve the signature status. tuf.sig.get_signature_status() raises: - # tuf.UnknownRoleError - # tuf.FormatError - status = get_signature_status(signable, role, repository_name) - + # tuf.UnknownRoleError tuf.FormatError. 'threshold' and 'keyids' are also + # validated. + status = get_signature_status(signable, role, repository_name, threshold, keyids) + # Retrieve the role's threshold and the authorized keys of 'status' threshold = status['threshold'] good_sigs = status['good_sigs'] @@ -251,7 +283,7 @@ def verify(signable, role, repository_name='default'): # Note: get_signature_status() is expected to verify that 'threshold' is # not None or <= 0. if threshold is None or threshold <= 0: #pragma: no cover - raise tuf.Error("Invalid threshold: " + str(threshold)) + raise tuf.Error("Invalid threshold: " + repr(threshold)) return len(good_sigs) >= threshold