mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-02-22 02:16:42 +00:00
fix more keyutil things and key-handling in test
This commit is contained in:
parent
975491b519
commit
c029698435
@ -113,8 +113,8 @@ def test_reject_storage_server(reactor, request, storage_nodes, temp_dir, introd
|
||||
gm_config = yield util.run_tahoe(
|
||||
reactor, request, "grid-manager", "--config", "-", "create",
|
||||
)
|
||||
privkey_bytes = json.loads(gm_config)['private_key'].encode('ascii')
|
||||
privkey, _ = ed25519.signing_keypair_from_string(privkey_bytes)
|
||||
gm_privkey_bytes = json.loads(gm_config)['private_key'].encode('ascii')
|
||||
gm_privkey, gm_pubkey = ed25519.signing_keypair_from_string(gm_privkey_bytes)
|
||||
|
||||
# create certificates for first 2 storage-servers
|
||||
for idx, storage in enumerate(storage_nodes[:2]):
|
||||
@ -140,7 +140,6 @@ def test_reject_storage_server(reactor, request, storage_nodes, temp_dir, introd
|
||||
print("inserting certificates")
|
||||
# insert their certificates
|
||||
for idx, storage in enumerate(storage_nodes[:2]):
|
||||
print(idx, storage)
|
||||
cert = yield util.run_tahoe(
|
||||
reactor, request, "grid-manager", "--config", "-", "sign",
|
||||
"storage{}".format(idx),
|
||||
@ -152,12 +151,12 @@ def test_reject_storage_server(reactor, request, storage_nodes, temp_dir, introd
|
||||
config.set("storage", "grid_management", "True")
|
||||
config.add_section("grid_manager_certificates")
|
||||
config.set("grid_manager_certificates", "default", "gridmanager.cert")
|
||||
config.write(open(join(storage._node_dir, "tahoe.cfg"), "w"))
|
||||
with open(join(storage._node_dir, "tahoe.cfg"), "w") as f:
|
||||
config.write(f)
|
||||
|
||||
# re-start this storage server
|
||||
storage.transport.signalProcess('TERM')
|
||||
yield storage.transport._protocol.exited
|
||||
time.sleep(1)
|
||||
storage_nodes[idx] = yield util._run_node(
|
||||
reactor, storage._node_dir, request, None,
|
||||
)
|
||||
@ -167,14 +166,16 @@ def test_reject_storage_server(reactor, request, storage_nodes, temp_dir, introd
|
||||
|
||||
config = configutil.get_config(join(carol._node_dir, "tahoe.cfg"))
|
||||
config.add_section("grid_managers")
|
||||
config.set("grid_managers", "test", pubkey_str)
|
||||
config.write(open(join(carol._node_dir, "tahoe.cfg"), "w"))
|
||||
config.set("grid_managers", "test", ed25519.string_from_verifying_key(gm_pubkey))
|
||||
with open(join(carol._node_dir, "tahoe.cfg"), "w") as f:
|
||||
config.write(f)
|
||||
carol.transport.signalProcess('TERM')
|
||||
yield carol.transport._protocol.exited
|
||||
|
||||
carol = yield util._run_node(
|
||||
reactor, carol._node_dir, request, None,
|
||||
)
|
||||
yield util.await_client_ready(carol, servers=5)
|
||||
|
||||
# try to put something into the grid, which should fail (because
|
||||
# carol has happy=3 but should only find storage0, storage1 to be
|
||||
|
@ -478,7 +478,7 @@ def web_post(tahoe, uri_fragment, **kwargs):
|
||||
return resp.content
|
||||
|
||||
|
||||
def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
||||
def await_client_ready(tahoe, timeout=10, liveness=60*2, servers=1):
|
||||
"""
|
||||
Uses the status API to wait for a client-type node (in `tahoe`, a
|
||||
`TahoeProcess` instance usually from a fixture e.g. `alice`) to be
|
||||
@ -502,8 +502,8 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
if len(js['servers']) == 0:
|
||||
print("waiting because no servers at all")
|
||||
if len(js['servers']) < servers:
|
||||
print("waiting because fewer than {} server(s)".format(servers))
|
||||
time.sleep(1)
|
||||
continue
|
||||
server_times = [
|
||||
|
@ -249,14 +249,14 @@ class _GridManager(object):
|
||||
"version": 1,
|
||||
}
|
||||
cert_data = json.dumps(cert_info, separators=(',',':'), sort_keys=True).encode('utf8')
|
||||
sig = self._private_key.sign(cert_data)
|
||||
sig = ed25519.sign_data(self._private_key, cert_data)
|
||||
certificate = {
|
||||
u"certificate": cert_data,
|
||||
u"signature": base32.b2a(sig),
|
||||
}
|
||||
|
||||
vk = ed25519.verifying_key_from_signing_key(self._private_key)
|
||||
assert vk.verify(sig, cert_data) is None, "cert should verify"
|
||||
ed25519.verify_signature(vk, sig, cert_data)
|
||||
|
||||
return certificate
|
||||
|
||||
|
@ -67,6 +67,7 @@ from allmydata.util.assertutil import precondition
|
||||
from allmydata.util.observer import ObserverList
|
||||
from allmydata.util.rrefutil import add_version_to_remote_reference
|
||||
from allmydata.util.hashutil import permute_server_hash
|
||||
from allmydata.crypto import ed25519
|
||||
|
||||
# who is responsible for de-duplication?
|
||||
# both?
|
||||
@ -473,11 +474,12 @@ def validate_grid_manager_certificate(gm_key, alleged_cert, now_fn=None):
|
||||
now_fn = datetime.utcnow
|
||||
|
||||
try:
|
||||
gm_key.verify(
|
||||
ed25519.verify_signature(
|
||||
gm_key,
|
||||
base32.a2b(alleged_cert['signature'].encode('ascii')),
|
||||
alleged_cert['certificate'].encode('ascii'),
|
||||
)
|
||||
except ed25519.BadSignatureError:
|
||||
except ed25519.BadSignature:
|
||||
return False
|
||||
# signature is valid; now we can load the actual data
|
||||
cert = json.loads(alleged_cert['certificate'])
|
||||
|
Loading…
x
Reference in New Issue
Block a user