2013-03-15 02:28:38 +00:00
2017-01-19 22:39:53 +00:00
import os , re , sys , time , json
2013-03-15 02:28:38 +00:00
2006-12-03 01:30:18 +00:00
from twisted . trial import unittest
2008-04-23 00:14:26 +00:00
from twisted . internet import defer
2016-08-04 05:09:12 +00:00
from twisted . application import service
2010-11-10 03:36:49 +00:00
2008-03-12 00:36:25 +00:00
import allmydata
2016-08-04 05:09:12 +00:00
from allmydata import client , uri
from allmydata . introducer . server import IntroducerNode
2009-02-18 21:46:55 +00:00
from allmydata . storage . mutable import MutableShareFile
from allmydata . storage . server import si_a2b
2009-12-01 22:44:35 +00:00
from allmydata . immutable import offloaded , upload
2010-08-04 07:27:10 +00:00
from allmydata . immutable . literal import LiteralFileNode
from allmydata . immutable . filenode import ImmutableFileNode
2016-08-04 05:09:12 +00:00
from allmydata . util import idlib , mathutil , pollmixin , fileutil , iputil
2008-07-07 21:11:02 +00:00
from allmydata . util import log , base32
2015-04-21 18:26:16 +00:00
from allmydata . util . encodingutil import quote_output , unicode_to_argv
2010-07-22 00:14:18 +00:00
from allmydata . util . fileutil import abspath_expanduser_unicode
2009-12-01 22:44:35 +00:00
from allmydata . util . consumer import MemoryConsumer , download_to_data
2016-08-04 05:09:12 +00:00
from allmydata . stats import StatsGathererService
2009-11-11 22:45:42 +00:00
from allmydata . interfaces import IDirectoryNode , IFileNode , \
2009-06-25 02:18:09 +00:00
NoSuchChildError , NoSharesError
2009-02-18 22:42:34 +00:00
from allmydata . monitor import Monitor
2010-01-27 06:44:30 +00:00
from allmydata . mutable . common import NotWriteableError
2008-04-11 21:31:16 +00:00
from allmydata . mutable import layout as mutable_layout
2011-08-07 00:44:59 +00:00
from allmydata . mutable . publish import MutableData
2012-06-15 23:20:28 +00:00
2016-08-04 05:09:12 +00:00
from foolscap . api import DeadReferenceError , fireEventually , flushEventualQueue
2007-04-19 01:29:10 +00:00
from twisted . python . failure import Failure
2006-12-03 01:30:18 +00:00
2016-08-04 05:09:12 +00:00
from . common import TEST_RSA_KEY_SIZE
2017-02-21 22:31:37 +00:00
from . common_web import do_http , Error
2007-04-07 03:34:32 +00:00
2011-01-21 08:04:29 +00:00
# TODO: move this to common or common_util
2011-01-20 04:32:38 +00:00
from allmydata . test . test_runner import RunBinTahoeMixin
2016-08-04 05:09:12 +00:00
from . import common_util as testutil
2016-09-09 17:41:20 +00:00
from . common_util import run_cli
2010-11-10 03:36:49 +00:00
2007-07-12 20:22:36 +00:00
LARGE_DATA = """
2010-01-14 20:11:19 +00:00
This is some data to publish to the remote grid . . , which needs to be large
2007-07-12 20:22:36 +00:00
enough to not fit inside a LIT uri .
"""
2016-08-04 05:09:12 +00:00
# our system test uses the same Tub certificates each time, to avoid the
# overhead of key generation
SYSTEM_TEST_CERTS = [
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQyMDVaFw0wOTA3MjUyMjQyMDVaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxHCWajrR
2 h / iurw8k93m8WUdE3xypJiiAITw7GkKlKbCLD + dEce2MXwVVYca0n / MZZsj89Cu
Ko0lLjksMseoSDoj98iEmVpaY5mc2ntpQ + FXdoEmPP234XRWEg2HQ + EaK6 + WkGQg
DDXQvFJCVCQk / n1MdAwZZ6vqf2ITzSuD44kCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQBn6qPKGdFjWJy7sOOTUFfm / THhHQqAh1pBDLkjR + OtzuobCoP8n8J1LNG3Yxds
Jj7NWQL7X5TfOlfoi7e9jK0ujGgWh3yYU6PnHzJLkDiDT3LCSywQuGXCjh0tOStS
2 gaCmmAK2cfxSStKzNcewl2Zs8wHMygq8TLFoZ6ozN1 + xQ ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXQIBAAKBgQDEcJZqOtHaH + K6vDyT3ebxZR0TfHKkmKIAhPDsaQqUpsIsP50R
x7YxfBVVhxrSf8xlmyPz0K4qjSUuOSwyx6hIOiP3yISZWlpjmZzae2lD4Vd2gSY8
/ bfhdFYSDYdD4Rorr5aQZCAMNdC8UkJUJCT + fUx0DBlnq + p / YhPNK4PjiQIDAQAB
AoGAZyDMdrymiyMOPwavrtlicvyohSBid3MCKc + hRBvpSB0790r2RO1aAySndp1V
QYmCXx1RhKDbrs8m49t0Dryu5T + sQrFl0E3usAP3vvXWeh4jwJ9GyiRWy4xOEuEQ
3 ewjbEItHqA / bRJF0TNtbOmZTDC7v9FRPf2bTAyFfTZep5kCQQD33q1RA8WUYtmQ
IArgHqt69i421lpXlOgqotFHwTx4FiGgVzDQCDuXU6txB9EeKRM340poissav / n6
bkLZ7 / VDAkEAyuIPkeI59sE5NnmW + N47NbCfdM1Smy1YxZpv942EmP9Veub5N0dw
iK5bLAgEguUIjpTsh3BRmsE9Xd + ItmnRQwJBAMZhbg19G1EbnE0BmDKv2UbcaThy
bnPSNc6J6T2opqDl9ZvCrMqTDD6dNIWOYAvni / 4 a556sFsoeBBAu10peBskCQE6S
cB86cuJagLLVMh / dySaI6ahNoFFSpY + ZuQUxfInYUR2Q + DFtbGqyw8JwtHaRBthZ
WqU1XZVGg2KooISsxIsCQQD1PS7 / / xHLumBb0jnpL7n6W8gmiTyzblT + 0 otaCisP
fN6rTlwV1o8VsOUAz0rmKO5RArCbkmb01WtMgPCDBYkk
- - - - - END RSA PRIVATE KEY - - - - -
""" , # 0
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQyMDVaFw0wOTA3MjUyMjQyMDVaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAs9CALdmW
kJ6r0KPSLdGCA8rzQKxWayrMckT22ZtbRv3aw6VA96dWclpY + T2maV0LrAzmMSL8
n61ydJHM33iYDOyWbwHWN45XCjY / e20PL54XUl / DmbBHEhQVQLIfCldcRcnWEfoO
iOhDJfWpDO1dmP / aOYLdkZCZvBtPAfyUqRcCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQAN9eaCREkzzk4yPIaWYkWHg3Igs1vnOR / iDw3OjyxO / xJFP2lkA2WtrwL2RTRq
dxA8gwdPyrWgdiZElwZH8mzTJ4OdUXLSMclLOg9kvH6gtSvhLztfEDwDP1wRhikh
OeWWu2GIC + uqFCI1ftoGgU + aIa6yrHswf66rrQvBSSvJPQ ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXQIBAAKBgQCz0IAt2ZaQnqvQo9It0YIDyvNArFZrKsxyRPbZm1tG / drDpUD3
p1ZyWlj5PaZpXQusDOYxIvyfrXJ0kczfeJgM7JZvAdY3jlcKNj97bQ8vnhdSX8OZ
sEcSFBVAsh8KV1xFydYR + g6I6EMl9akM7V2Y / 9 o5gt2RkJm8G08B / JSpFwIDAQAB
AoGBAIUy5zCPpSP + FeJY6CG + t6Pdm / IFd4KtUoM3KPCrT6M3 + uzApm6Ny9Crsor2
qyYTocjSSVaOxzn1fvpw4qWLrH1veUf8ozMs8Z0VuPHD1GYUGjOXaBPXb5o1fQL9
h7pS5 / HrDDPN6wwDNTsxRf / fP58CnfwQUhwdoxcx8TnVmDQxAkEA6N3jBXt / Lh0z
UbXHhv3QBOcqLZA2I4tY7wQzvUvKvVmCJoW1tfhBdYQWeQv0jzjL5PzrrNY8hC4l
8 + sFM3h5TwJBAMWtbFIEZfRSG1JhHK3evYHDTZnr / j + CdoWuhzP5RkjkIKsiLEH7
2 ZhA7CdFQLZF14oXy + g1uVCzzfB2WELtUbkCQQDKrb1XWzrBlzbAipfkXWs9qTmj
uJ32Z + V6 + 0 xRGPOXxJ0sDDqw7CeFMfchWg98zLFiV + SEZV78qPHtkAPR3ayvAkB +
hUMhM4N13t9x2IoclsXAOhp + + 9 bdG0l0woHyuAdOPATUw6iECwf4NQVxFRgYEZek
4 Ro3Y7taddrHn1dabr6xAkAic47OoLOROYLpljmJJO0eRe3Z5IFe + 0 D2LfhAW3LQ
JU + oGq5pCjfnoaDElRRZn0 + GmunnWeQEYKoflTi / lI9d
- - - - - END RSA PRIVATE KEY - - - - -
""" , # 1
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsxG7LTrz
DF + 9 wegOR / BRJhjSumPUbYQnNAUKtPraFsGjAJILP44AHdnHt1MONLgTeX1ynapo
q6O / q5cdKtBB7uEh7FpkLCCwpZt / m0y79cynn8AmWoQVgl8oS0567UmPeJnTzFPv
dmT5dlaQALeX5YGceAsEvhmAsdOMttaor38CAwEAATANBgkqhkiG9w0BAQQFAAOB
gQA345rxotfvh2kfgrmRzAyGewVBV4r23Go30GSZir8X2GoH3qKNwO4SekAohuSw
AiXzLUbwIdSRSqaLFxSC7Duqc9eIeFDAWjeEmpfFLBNiw3K8SLA00QrHCUXnECTD
b / Kk6OGuvPOiuuONVjEuEcRdCH3 / Li30D0AhJaMynjhQJQ ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXQIBAAKBgQCzEbstOvMMX73B6A5H8FEmGNK6Y9RthCc0BQq0 + toWwaMAkgs /
jgAd2ce3Uw40uBN5fXKdqmiro7 + rlx0q0EHu4SHsWmQsILClm3 + bTLv1zKefwCZa
hBWCXyhLTnrtSY94mdPMU + 92 ZPl2VpAAt5flgZx4CwS + GYCx04y21qivfwIDAQAB
AoGBAIlhFg / aRPL + VM9539LzHN60dp8GzceDdqwjHhbAySZiQlLCuJx2rcI4 / U65
CpIJku9G / fLV9N2RkA / trDPXeGyqCTJfnNzyZcvvMscRMFqSGyc21Y0a + GS8bIxt
1 R2B18epSVMsWSWWMypeEgsfv29LV7oSWG8UKaqQ9 + 0 h63DhAkEA4i2L / rori / Fb
wpIBfA + xbXL / GmWR7xPW + 3 nG3LdLQpVzxz4rIsmtO9hIXzvYpcufQbwgVACyMmRf
TMABeSDM7wJBAMquEdTaVXjGfH0EJ7z95Ys2rYTiCXjBfyEOi6RXXReqV9SXNKlN
aKsO22zYecpkAjY1EdUdXWP / mNVEybjpZnECQQCcuh0JPS5RwcTo9c2rjyBOjGIz
g3B1b5UIG2FurmCrWe6pgO3ZJFEzZ / L2cvz0Hj5UCa2JKBZTDvRutZoPumfnAkAb
nSW + y1Rz1Q8m9Ub4v9rjYbq4bRd / RVWtyk6KQIDldYbr5wH8wxgsniSVKtVFFuUa
P5bDY3HS6wMGo42cTOhxAkAcdweQSQ3j7mfc5vh71HeAC1v / VAKGehGOUdeEIQNl
Sb2WuzpZkbfsrVzW6MdlgY6eE7ufRswhDPLWPC8MP0d1
- - - - - END RSA PRIVATE KEY - - - - -
""" , # 2
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxnH + pbOS
qlJlsHpKUQtV0oN1Mv + ESG + yUDxStFFGjkJv / UIRzpxqFqY / 6 nJ3D03kZsDdcXyi
CfV9hPYQaVNMn6z + puPmIagfBQ0aOyuI + nUhCttZIYD9071BjW5bCMX5NZWL / CZm
E0HdAZ77H6UrRckJ7VR8wAFpihBxD5WliZcCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQAwXqY1Sjvp9JSTHKklu7s0T6YmH / BKSXrHpS2xO69svK + ze5 / + 5 td3jPn4Qe50
xwRNZSFmSLuJLfCO32QJSJTB7Vs5D3dNTZ2i8umsaodm97t8hit7L75nXRGHKH / /
xDVWAFB9sSgCQyPMRkL4wB4YSfRhoSKVwMvaz + XRZDUU0A ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXAIBAAKBgQDGcf6ls5KqUmWwekpRC1XSg3Uy / 4 RIb7JQPFK0UUaOQm / 9 QhHO
nGoWpj / qcncPTeRmwN1xfKIJ9X2E9hBpU0yfrP6m4 + YhqB8FDRo7K4j6dSEK21kh
gP3TvUGNblsIxfk1lYv8JmYTQd0BnvsfpStFyQntVHzAAWmKEHEPlaWJlwIDAQAB
AoGAdHNMlXwtItm7ZrY8ihZ2xFP0IHsk60TwhHkBp2LSXoTKJvnwbSgIcUYZ18BX
8 Zkp4MpoqEIU7HcssyuaMdR572huV2w0D / 2 gYJQLQ5JapaR3hMox3YG4wjXasN1U
1 iZt7JkhKlOy + ElL5T9mKTE1jDsX2RAv4WALzMpYFo7vs4ECQQDxqrPaqRQ5uYS /
ejmIk05nM3Q1zmoLtMDrfRqrjBhaf / W3hqGihiqN2kL3PIIYcxSRWiyNlYXjElsR
2 sllBTe3AkEA0jcMHVThwKt1 + Ce5VcE7N6hFfbsgISTjfJ + Q3K2NkvJkmtE8ZRX5
XprssnPN8owkfF5yuKbcSZL3uvaaSGN9IQJAfTVnN9wwOXQwHhDSbDt9 / KRBCnum
n + gHqDrKLaVJHOJ9SZf8eLswoww5c + UqtkYxmtlwie61Tp + 9 BXQosilQ4wJBAIZ1
XVNZmriBM4jR59L5MOZtxF0ilu98R + HLsn3kqLyIPF9mXCoQPxwLHkEan213xFKk
mt6PJDIPRlOZLqAEuuECQFQMCrn0VUwPg8E40pxMwgMETvVflPs / oZK1Iu + b7 + WY
vBptAyhMu31fHQFnJpiUOyHqSZnOZyEn1Qu2lszNvUg =
- - - - - END RSA PRIVATE KEY - - - - -
""" , # 3
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAnjiOwipn
jigDuNMfNG / tBJhPwYUHhSbQdvrTubhsxw1oOq5XpNqUwRtC8hktOKM3hghyqExP
62 EOi0aJBkRhtwtPSLBCINptArZLfkog / nTIqVv4eLEzJ19nTi / llHHWKcgA6XTI
sU / snUhGlySA3RpETvXqIJTauQRZz0kToSUCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQCQ + u / CsX5WC5m0cLrpyIS6qZa62lrB3mj9H1aIQhisT5kRsMz3FJ1aOaS8zPRz
w0jhyRmamCcSsWf5WK539iOtsXbKMdAyjNtkQO3g + fnsLgmznAjjst24jfr + XU59
0 amiy1U6TY93gtEBZHtiLldPdUMsTuFbBlqbcMBQ50x9rA ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXAIBAAKBgQCeOI7CKmeOKAO40x80b + 0 EmE / BhQeFJtB2 + tO5uGzHDWg6rlek
2 pTBG0LyGS04ozeGCHKoTE / rYQ6LRokGRGG3C09IsEIg2m0Ctkt + SiD + dMipW / h4
sTMnX2dOL + WUcdYpyADpdMixT + ydSEaXJIDdGkRO9eoglNq5BFnPSROhJQIDAQAB
AoGAAPrst3s3xQOucjismtCOsVaYN + SxFTwWUoZfRWlFEz6cBLELzfOktEWM9p79
TrqEH4px22UNobGqO2amdql5yXwEFVhYQkRB8uDA8uVaqpL8NLWTGPRXxZ2DSU + n
7 / FLf / TWT3ti / ZtXaPVRj6E2 / Mq9AVEVOjUYzkNjM02OxcECQQDKEqmPbdZq2URU
7 RbUxkq5aTp8nzAgbpUsgBGQ9PDAymhj60BDEP0q28Ssa7tU70pRnQ3AZs9txgmL
kK2g97FNAkEAyHH9cIb6qXOAJPIr / xamFGr5uuYw9TJPz / hfVkVimW / aZnBB + e6Q
oALJBDKJWeYPzdNbouJYg8MeU0qWdZ5DOQJADUk + 1 sxc / bd9U6wnBSRog1pU2x7I
VkmPC1b8ULCaJ8LnLDKqjf5O9wNuIfwPXB1DoKwX3F + mIcyUkhWYJO5EPQJAUj5D
KMqZSrGzYHVlC / M1Daee88rDR7fu + 3 wDUhiCDkbQq7tftrbl7GF4LRq3NIWq8l7I
eJq6isWiSbaO6Y + YMQJBAJFBpVhlY5Px2BX5 + Hsfq6dSP3sVVc0eHkdsoZFFxq37
fksL / q2vlPczvBihgcxt + UzW / UrNkelOuX3i57PDvFs =
- - - - - END RSA PRIVATE KEY - - - - -
""" , # 4
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsCQuudDF
zgmY5tDpT0TkUo8fpJ5JcvgCkLFpSDD8REpXhLFkHWhTmTj3CAxfv4lA3sQzHZxe
4 S9YCb5c / VTbFEdgwc / wlxMmJiz2jYghdmWPBb8pBEk31YihIhC + u4kex6gJBH5y
ixiZ3PPRRMaOBBo + ZfM50XIyWbFOOM / 7 FwcCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQB4cFURaiiUx6n8eS4j4Vxrii5PtsaNEI4acANFSYknGd0xTP4vnmoivNmo5fWE
Q4hYtGezNu4a9MnNhcQmI20KzXmvhLJtkwWCgGOVJtMem8hDWXSALV1Ih8hmVkGS
CI1elfr9eyguunGp9eMMQfKhWH52WHFA0NYa0Kpv5BY33A ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICWwIBAAKBgQCwJC650MXOCZjm0OlPRORSjx + knkly + AKQsWlIMPxESleEsWQd
aFOZOPcIDF + / iUDexDMdnF7hL1gJvlz9VNsUR2DBz / CXEyYmLPaNiCF2ZY8FvykE
STfViKEiEL67iR7HqAkEfnKLGJnc89FExo4EGj5l8znRcjJZsU44z / sXBwIDAQAB
AoGABA7xXKqoxBSIh1js5zypHhXaHsre2l1Igdj0mgs25MPpvE7yBZNvyan8Vx0h
36 Hj8r4Gh3og3YNfvem67sNTwNwONY0ep + Xho / 3 vG0jFATGduSXdcT04DusgZNqg
UJqW75cqxrD6o / nya5wUoN9NL5pcd5AgVMdOYvJGbrwQuaECQQDiCs / 5 dsUkUkeC
Tlur1wh0wJpW4Y2ctO3ncRdnAoAA9y8dELHXMqwKE4HtlyzHY7Bxds / BDh373EVK
rsdl + v9JAkEAx3xTmsOQvWa1tf / O30sdItVpGogKDvYqkLCNthUzPaL85BWB03E2
xunHcVVlqAOE5tFuw0 / UEyEkOaGlNTJTzwJAPIVel9FoCUiKYuYt / z1swy3KZRaw
/ tMmm4AZHvh5Y0jLcYHFy / OCQpRkhkOitqQHWunPyEXKW2PnnY5cTv68GQJAHG7H
B88KCUTjb25nkQIGxBlA4swzCtDhXkAb4rEA3a8mdmfuWjHPyeg2ShwO4jSmM7P0
Iph1NMjLff9hKcTjlwJARpItOFkYEdtSODC7FMm7KRKQnNB27gFAizsOYWD4D2b7
w1FTEZ / kSA9wSNhyNGt7dgUo6zFhm2u973HBCUb3dg ==
- - - - - END RSA PRIVATE KEY - - - - -
""" , # 5
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAvhTRj1dA
NOfse / UBeTfMekZKxZHsNPr + qBYaveWAHDded / BMyMgaMV2n6HQdiDaRjJkzjHCF
3 xBtpIJeEGUqfrF0ob8BIZXy3qk68eX / 0 CVUbgmjSBN44ahlo63NshyXmZtEAkRV
VE / + cRKw3N2wtuTed5xwfNcL6dg4KTOEYEkCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQCN + CLuVwLeWjSdVbdizYyrOVckqtwiIHG9BbGMlcIdm0qpvD7V7 / sN2csk5LaT
BNiHi1t5628 / 4 UHqqodYmFw8ri8ItFwB + MmTJi11CX6dIP9OUhS0qO8Z / BKtot7H
j04oNwl + WqZZfHIYwTIEL0HBn60nOvCQPDtnWG2BhpUxMA ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXQIBAAKBgQC + FNGPV0A05 + x79QF5N8x6RkrFkew0 + v6oFhq95YAcN1538EzI
yBoxXafodB2INpGMmTOMcIXfEG2kgl4QZSp + sXShvwEhlfLeqTrx5f / QJVRuCaNI
E3jhqGWjrc2yHJeZm0QCRFVUT / 5 xErDc3bC25N53nHB81wvp2DgpM4RgSQIDAQAB
AoGALl2BqIdN4Bnac3oV + + 2 CcSkIQB0SEvJOf820hDGhCEDxSCxTbn5w9S21MVxx
f7Jf2n3cNxuTbA / jzscGDtW + gXCs + WAbAr5aOqHLUPGEobhKQrQT2hrxQHyv3UFp
0 tIl9eXFknOyVAaUJ3athK5tyjSiCZQQHLGzeLaDSKVAPqECQQD1GK7DkTcLaSvw
hoTJ3dBK3JoKT2HHLitfEE0QV58mkqFMjofpe + nyeKWvEb / oB4WBp / cfTvtf7DJK
zl1OSf11AkEAxomWmJeub0xpqksCmnVI1Jt1mvmcE4xpIcXq8sxzLHRc2QOv0kTw
IcFl4QcN6EQBmE + 8 kl7Tx8SPAVKfJMoZBQJAGsUFYYrczjxAdlba7glyFJsfn / yn
m0 + poQpwwFYxpc7iGzB + G7xTAw62WfbAVSFtLYog7aR8xC9SFuWPP1vJeQJBAILo
xBj3ovgWTXIRJbVM8mnl28UFI0msgsHXK9VOw / 6 i93nMuYkPFbtcN14KdbwZ42dX
5 EIrLr + BNr4riW4LqDUCQQCbsEEpTmj3upKUOONPt + 6 CH / OOMjazUzYHZ / 3 ORHGp
Q3Wt + I4IrR / OsiACSIQAhS4kBfk / LGggnj56DrWt + oBl
- - - - - END RSA PRIVATE KEY - - - - -
""" , #6
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAtKhx6sEA
jn6HWc6T2klwlPn0quyHtATIw8V3ezP46v6g2rRS7dTywo4GTP4vX58l + sC9z9Je
qhQ1rWSwMK4FmnDMZCu7AVO7oMIXpXdSz7l0bgCnNjvbpkA2pOfbB1Z8oj8iebff
J33ID5DdkmCzqYVtKpII1o / 5 z7Jo292JYy8CAwEAATANBgkqhkiG9w0BAQQFAAOB
gQA0PYMA07wo9kEH4fv9TCfo + zz42Px6lUxrQBPxBvDiGYhk2kME / wX0IcoZPKTV
WyBGmDAYWvFaHWbrbbTOfzlLWfYrDD913hCi9cO8iF8oBqRjIlkKcxAoe7vVg5Az
ydVcrY + zqULJovWwyNmH1QNIQfMat0rj7fylwjiS1y / YsA ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXAIBAAKBgQC0qHHqwQCOfodZzpPaSXCU + fSq7Ie0BMjDxXd7M / jq / qDatFLt
1 PLCjgZM / i9fnyX6wL3P0l6qFDWtZLAwrgWacMxkK7sBU7ugwheld1LPuXRuAKc2
O9umQDak59sHVnyiPyJ5t98nfcgPkN2SYLOphW0qkgjWj / nPsmjb3YljLwIDAQAB
AoGAU4CYRv22mCZ7wVLunDLdyr5ODMMPZnHfqj2XoGbBYz0WdIBs5GlNXAfxeZzz
oKsbDvAPzANcphh5RxAHMDj / dT8rZOez + eJrs1GEV + crl1T9p83iUkAuOJFtgUgf
TtQBL9vHaj7DfvCEXcBPmN / teDFmAAOyUNbtuhTkRa3PbuECQQDwaqZ45Kr0natH
V312dqlf9ms8I6e873pAu + RvA3BAWczk65eGcRjEBxVpTvNEcYKFrV8O5ZYtolrr
VJl97AfdAkEAwF4w4KJ32fLPVoPnrYlgLw86NejMpAkixblm8cn51avPQmwbtahb
BZUuca22IpgDpjeEk5SpEMixKe / UjzxMewJBALy4q2cY8U3F + u6sshLtAPYQZIs3
3 fNE9W2dUKsIQvRwyZMlkLN7UhqHCPq6e + HNTM0MlCMIfAPkf4Rdy4N6ZY0CQCKE
BAMaQ6TwgzFDw5sIjiCDe + 9 WUPmRxhJyHL1 / fvtOs4Z4fVRP290ZklbFU2vLmMQH
LBuKzfb7 + 4 XJyXrV1 + cCQBqfPFQQZLr5UgccABYQ2jnWVbJPISJ5h2b0cwXt + pz /
8 ODEYLjqWr9K8dtbgwdpzwbkaGhQYpyvsguMvNPMohs =
- - - - - END RSA PRIVATE KEY - - - - -
""" , #7
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAnBfNHycn
5 RnYzDN4EWTk2q1BBxA6ZYtlG1WPkj5iKeaYKzUk58zBL7mNOA0ucq + yTwh9C4IC
EutWPaKBSKY5XI + Rdebh + Efq + urtOLgfJHlfcCraEx7hYN + tqqMVgEgnO / MqIsn1
I1Fvnp89mSYbQ9tmvhSH4Hm + nbeK6iL2tIsCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQBt9zxfsKWoyyV764rRb6XThuTDMNSDaVofqePEWjudAbDu6tp0pHcrL0XpIrnT
3 iPgD47pdlwQNbGJ7xXwZu2QTOq + Lv62E6PCL8FljDVoYqR3WwJFFUigNvBT2Zzu
Pxx7KUfOlm / M4XUSMu31sNJ0kQniBwpkW43YmHVNFb / R7g ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXQIBAAKBgQCcF80fJyflGdjMM3gRZOTarUEHEDpli2UbVY + SPmIp5pgrNSTn
zMEvuY04DS5yr7JPCH0LggIS61Y9ooFIpjlcj5F15uH4R + r66u04uB8keV9wKtoT
HuFg362qoxWASCc78yoiyfUjUW + enz2ZJhtD22a + FIfgeb6dt4rqIva0iwIDAQAB
AoGBAIHstcnWd7iUeQYPWUNxLaRvTY8pjNH04yWLZEOgNWkXDVX5mExw + + RTmB4t
qpm / cLWkJSEtB7jjthb7ao0j / t2ljqfr6kAbClDv3zByAEDhOu8xB / 5 ne6Ioo + k2
dygC + GcVcobhv8qRU + z0fpeXSP8yS1bQQHOaa17bSGsncvHRAkEAzwsn8jBTOqaW
6 Iymvr7Aql + + LiwEBrqMMRVyBZlkux4hiKa2P7XXEL6 / mOPR0aI2LuCqE2COrO7R
0 wAFZ54bjwJBAMEAe6cs0zI3p3STHwA3LoSZB81lzLhGUnYBvOq1yoDSlJCOYpld
YM1y3eC0vwiOnEu3GG1bhkW + h6Kx0I / qyUUCQBiH9NqwORxI4rZ4 + 8 S76y4EnA7y
biOx9KxYIyNgslutTUHYpt1TmUDFqQPfclvJQWw6eExFc4Iv5bJ / XSSSyicCQGyY
5 PrwEfYTsrm5fpwUcKxTnzxHp6WYjBWybKZ0m / lYhBfCxmAdVrbDh21Exqj99Zv0
7 l26PhdIWfGFtCEGrzECQQCtPyXa3ostSceR7zEKxyn9QBCNXKARfNNTBja6 + VRE
qDC6jLqzu / SoOYaqa13QzCsttO2iZk8Ygfy3Yz0n37GE
- - - - - END RSA PRIVATE KEY - - - - -
""" , #8
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4mnLf + x0
CWKDKP5PLZ87t2ReSDE / J5QoI5VhE0bXaahdhPrQTC2wvOpT + N9nzEpI9ASh / ejV
kYGlc03nNKRL7zyVM1UyGduEwsRssFMqfyJhI1p + VmxDMWNplex7mIAheAdskPj3
pwi2CP4VIMjOj368AXvXItPzeCfAhYhEVaMCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQAEzmwq5JFI5Z0dX20m9rq7NKgwRyAH3h5aE8bdjO8nEc69qscfDRx79Lws3kK8
A0LG0DhxKB8cTNu3u + jy81tjcC4pLNQ5IKap9ksmP7RtIHfTA55G8M3fPl2ZgDYQ
ZzsWAZvTNXd / eme0SgOzD10rfntA6ZIgJTWHx3E0RkdwKw ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXQIBAAKBgQDiact / 7 HQJYoMo / k8tnzu3ZF5IMT8nlCgjlWETRtdpqF2E + tBM
LbC86lP432fMSkj0BKH96NWRgaVzTec0pEvvPJUzVTIZ24TCxGywUyp / ImEjWn5W
bEMxY2mV7HuYgCF4B2yQ + PenCLYI / hUgyM6PfrwBe9ci0 / N4J8CFiERVowIDAQAB
AoGAQYTl + 8 XcKl8Un4dAOG6M5FwqIHAH25c3Klzu85obehrbvUCriG / sZi7VT / 6 u
VeLlS6APlJ + NNgczbrOLhaNJyYzjICSt8BI96PldFUzCEkVlgE + 29 pO7RNoZmDYB
dSGyIDrWdVYfdzpir6kC0KDcrpA16Sc + / bK6Q8ALLRpC7QECQQD7F7fhIQ03CKSk
lS4mgDuBQrB / 52 jXgBumtjp71ANNeaWR6 + 06 KDPTLysM + olsh97Q7YOGORbrBnBg
Y2HPnOgjAkEA5taZaMfdFa8V1SPcX7mgCLykYIujqss0AmauZN / 24 oLdNE8HtTBF
OLaxE6PnQ0JWfx9KGIy3E0V3aFk5FWb0gQJBAO4KFEaXgOG1jfCBhNj3JHJseMso
5 Nm4F366r0MJQYBHXNGzqphB2K / Svat2MKX1QSUspk2u / a0d05dtYCLki6UCQHWS
sChyQ + UbfF9HGKOZBC3vBzo1ZXNEdIUUj5bJjBHq3YgbCK38nAU66A482TmkvDGb
Wj4OzeB + 7 Ua0yyJfggECQQDVlAa8HqdAcrbEwI / YfPydFsavBJ0KtcIGK2owQ + dk
dhlDnpXDud / AtX4Ft2LaquQ15fteRrYjjwI9SFGytjtp
- - - - - END RSA PRIVATE KEY - - - - -
""" , #9
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAueLfowPT
kXXtHeU2FZSz2mJhHmjqeyI1oMoyyggonccx65vMxaRfljnz2dOjVVYpCOn / LrdP
wVxHO8KNDsmQeWPRjnnBa2dFqqOnp / 8 gEJFJBW7K / gI9se6o + xe9QIWBq6d / fKVR
BURJe5TycLogzZuxQn1xHHILa3XleYuHAbMCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQBEC1lfC3XK0galQC96B7faLpnQmhn5lX2FUUoFIQQtBTetoE + gTqnLSOIZcOK4
pkT3YvxUvgOV0LOLClryo2IknMMGWRSAcXtVUBBLRHVTSSuVUyyLr5kdRU7B4E + l
OU0j8Md / dzlkm / / K1bzLyUaPq204ofH8su2IEX4b3IGmAQ ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICWwIBAAKBgQC54t + jA9ORde0d5TYVlLPaYmEeaOp7IjWgyjLKCCidxzHrm8zF
pF + WOfPZ06NVVikI6f8ut0 / BXEc7wo0OyZB5Y9GOecFrZ0Wqo6en / yAQkUkFbsr +
Aj2x7qj7F71AhYGrp398pVEFREl7lPJwuiDNm7FCfXEccgtrdeV5i4cBswIDAQAB
AoGAO4PnJHNaLs16AMNdgKVevEIZZDolMQ1v7C4w + ryH / JRFaHE2q + UH8bpWV9zK
A82VT9RTrqpkb71S1VBiB2UDyz263XdAI / N2HcIVMmfKb72oV4gCI1KOv4DfFwZv
tVVcIdVEDBOZ2TgqK4opGOgWMDqgIAl2z3PbsIoNylZHEJECQQDtQeJFhEJGH4Qz
BGpdND0j2nnnJyhOFHJqikJNdul3uBwmxTK8FPEUUH / rtpyUan3VMOyDx3kX4OQg
GDNSb32rAkEAyJIZIJ0EMRHVedyWsfqR0zTGKRQ + qsc3sCfyUhFksWms9jsSS0DT
tVeTdC3F6EIAdpKOGhSyfBTU4jxwbFc0GQJADI4L9znEeAl66Wg2aLA2 / Aq3oK / F
xjv2wgSG9apxOFCZzMNqp + FD0Jth6YtEReZMuldYbLDFi6nu6HPfY2Fa + QJAdpm1
lAxk6yMxiZK / 5 VRWoH6HYske2Vtd + aNVbePtF992ME / z3F3kEkpL3hom + dT1cyfs
MU3l0Ot8ip7Ul6vlGQJAegNzpcfl2GFSdWQMxQ + nN3woKnPqpR1M3jgnqvo7L4Xe
JW3vRxvfdrUuzdlvZ / Pbsu / vOd + cuIa4h0yD5q3N + g ==
- - - - - END RSA PRIVATE KEY - - - - -
""" , #10
""" -----BEGIN CERTIFICATE-----
MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro
aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU
DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAruBhwk + J
XdlwfKXXN8K + 43 JyEYCV7Fp7ZiES4t4AEJuQuBqJVMxpzeZzu2t / vVb59ThaxxtY
NGD3Xy6Og5dTv / / ztWng8P7HwwvfbrUICU6zo6JAhg7kfaNa116krCYOkC / cdJWt
o5W + zsDmI1jUVGH0D73h29atc1gn6wLpAsMCAwEAATANBgkqhkiG9w0BAQQFAAOB
gQAEJ / ITGJ9lK / rk0yHcenW8SHsaSTlZMuJ4yEiIgrJ2t71Rd6mtCC / ljx9USvvK
bF500whTiZlnWgKi02boBEKa44z / DytF6pljeNPefBQSqZyUByGEb / 8 Mn58Idyls
q4 / d9iKXMPvbpQdcesOzgOffFZevLQSWyPRaIdYBOOiYUA ==
- - - - - END CERTIFICATE - - - - -
- - - - - BEGIN RSA PRIVATE KEY - - - - -
MIICXQIBAAKBgQCu4GHCT4ld2XB8pdc3wr7jcnIRgJXsWntmIRLi3gAQm5C4GolU
zGnN5nO7a3 + 9 Vvn1OFrHG1g0YPdfLo6Dl1O / / / O1aeDw / sfDC99utQgJTrOjokCG
DuR9o1rXXqSsJg6QL9x0la2jlb7OwOYjWNRUYfQPveHb1q1zWCfrAukCwwIDAQAB
AoGAcZAXC / dYrlBpIxkTRQu7qLqGZuVI9t7fabgqqpceFargdR4Odrn0L5jrKRer
MYrM8bjyAoC4a / NYUUBLnhrkcCQWO9q5fSQuFKFVWHY53SM63Qdqk8Y9Fmy / h / 4 c
UtwZ5BWkUWItvnTMgb9bFcvSiIhEcNQauypnMpgNknopu7kCQQDlSQT10LkX2IGT
bTUhPcManx92gucaKsPONKq2mP + 1 sIciThevRTZWZsxyIuoBBY43NcKKi8NlZCtj
hhSbtzYdAkEAw0B93CXfso8g2QIMj / HJJz / wNTLtg + rriXp6jh5HWe6lKWRVrce +
1 w8Qz6OI / ZP6xuQ9HNeZxJ / W6rZPW6BGXwJAHcTuRPA1p / fvUvHh7Q / 0 zfcNAbkb
QlV9GL / TzmNtB + 0 EjpqvDo2g8XTlZIhN85YCEf8D5DMjSn3H + GMHN / SArQJBAJlW
MIGPjNoh5V4Hae4xqBOW9wIQeM880rUo5s5toQNTk4mqLk9Hquwh / MXUXGUora08
2 XGpMC1midXSTwhaGmkCQQCdivptFEYl33PrVbxY9nzHynpp4Mi89vQF0cjCmaYY
N8L + bvLd4BU9g6hRS8b59lQ6GNjryx2bUnCVtLcey4Jd
- - - - - END RSA PRIVATE KEY - - - - -
""" , #11
]
# To disable the pre-computed tub certs, uncomment this line.
#SYSTEM_TEST_CERTS = []
def flush_but_dont_ignore ( res ) :
d = flushEventualQueue ( )
def _done ( ignored ) :
return res
d . addCallback ( _done )
return d
class SystemTestMixin ( pollmixin . PollMixin , testutil . StallMixin ) :
# SystemTestMixin tests tend to be a lot of work, and we have a few
# buildslaves that are pretty slow, and every once in a while these tests
# run up against the default 120 second timeout. So increase the default
# timeout. Individual test cases can override this, of course.
timeout = 300
def setUp ( self ) :
self . sparent = service . MultiService ( )
self . sparent . startService ( )
self . stats_gatherer = None
self . stats_gatherer_furl = None
def tearDown ( self ) :
log . msg ( " shutting down SystemTest services " )
d = self . sparent . stopService ( )
d . addBoth ( flush_but_dont_ignore )
return d
def getdir ( self , subdir ) :
return os . path . join ( self . basedir , subdir )
def add_service ( self , s ) :
s . setServiceParent ( self . sparent )
return s
def set_up_nodes ( self , NUMCLIENTS = 5 , use_stats_gatherer = False ) :
self . numclients = NUMCLIENTS
iv_dir = self . getdir ( " introducer " )
if not os . path . isdir ( iv_dir ) :
fileutil . make_dirs ( iv_dir )
fileutil . write ( os . path . join ( iv_dir , ' tahoe.cfg ' ) ,
" [node] \n " +
u " nickname = introducer \u263A \n " . encode ( ' utf-8 ' ) +
" web.port = tcp:0:interface=127.0.0.1 \n " )
if SYSTEM_TEST_CERTS :
os . mkdir ( os . path . join ( iv_dir , " private " ) )
f = open ( os . path . join ( iv_dir , " private " , " node.pem " ) , " w " )
f . write ( SYSTEM_TEST_CERTS [ 0 ] )
f . close ( )
iv = IntroducerNode ( basedir = iv_dir )
self . introducer = self . add_service ( iv )
self . _get_introducer_web ( )
d = defer . succeed ( None )
if use_stats_gatherer :
d . addCallback ( self . _set_up_stats_gatherer )
d . addCallback ( self . _set_up_nodes_2 )
if use_stats_gatherer :
d . addCallback ( self . _grab_stats )
return d
def _get_introducer_web ( self ) :
f = open ( os . path . join ( self . getdir ( " introducer " ) , " node.url " ) , " r " )
self . introweb_url = f . read ( ) . strip ( )
f . close ( )
def _set_up_stats_gatherer ( self , res ) :
statsdir = self . getdir ( " stats_gatherer " )
fileutil . make_dirs ( statsdir )
portnum = iputil . allocate_tcp_port ( )
location = " tcp:127.0.0.1: %d " % portnum
fileutil . write ( os . path . join ( statsdir , " location " ) , location )
port = " tcp: %d :interface=127.0.0.1 " % portnum
fileutil . write ( os . path . join ( statsdir , " port " ) , port )
self . stats_gatherer_svc = StatsGathererService ( statsdir )
self . stats_gatherer = self . stats_gatherer_svc . stats_gatherer
self . add_service ( self . stats_gatherer_svc )
d = fireEventually ( )
sgf = os . path . join ( statsdir , ' stats_gatherer.furl ' )
def check_for_furl ( ) :
return os . path . exists ( sgf )
d . addCallback ( lambda junk : self . poll ( check_for_furl , timeout = 30 ) )
def get_furl ( junk ) :
self . stats_gatherer_furl = file ( sgf , ' rb ' ) . read ( ) . strip ( )
d . addCallback ( get_furl )
return d
def _set_up_nodes_2 ( self , res ) :
q = self . introducer
self . introducer_furl = q . introducer_url
self . clients = [ ]
basedirs = [ ]
for i in range ( self . numclients ) :
basedir = self . getdir ( " client %d " % i )
basedirs . append ( basedir )
fileutil . make_dirs ( os . path . join ( basedir , " private " ) )
if len ( SYSTEM_TEST_CERTS ) > ( i + 1 ) :
f = open ( os . path . join ( basedir , " private " , " node.pem " ) , " w " )
f . write ( SYSTEM_TEST_CERTS [ i + 1 ] )
f . close ( )
config = " [client] \n "
2017-01-08 23:18:28 +00:00
if i != 1 :
# clients[1] uses private/introducers.yaml, not tahoe.cfg
config + = " introducer.furl = %s \n " % self . introducer_furl
2016-08-04 05:09:12 +00:00
if self . stats_gatherer_furl :
config + = " stats_gatherer.furl = %s \n " % self . stats_gatherer_furl
nodeconfig = " [node] \n "
nodeconfig + = ( u " nickname = client %d \u263A \n " % ( i , ) ) . encode ( ' utf-8 ' )
tub_port = iputil . allocate_tcp_port ( )
# Don't let it use AUTO: there's no need for tests to use
# anything other than 127.0.0.1
nodeconfig + = " tub.port = tcp: %d \n " % tub_port
nodeconfig + = " tub.location = tcp:127.0.0.1: %d \n " % tub_port
if i == 0 :
# clients[0] runs a webserver and a helper
config + = nodeconfig
config + = " web.port = tcp:0:interface=127.0.0.1 \n "
config + = " timeout.keepalive = 600 \n "
config + = " [helper] \n "
config + = " enabled = True \n "
2017-01-08 23:18:28 +00:00
elif i == 1 :
# clients[1] uses private/introducers.yaml, not tahoe.cfg
iyaml = ( " introducers: \n "
" petname2: \n "
" furl: %s \n " ) % self . introducer_furl
iyaml_fn = os . path . join ( basedir , " private " , " introducers.yaml " )
fileutil . write ( iyaml_fn , iyaml )
2016-08-04 05:09:12 +00:00
elif i == 3 :
# clients[3] runs a webserver and uses a helper
config + = nodeconfig
config + = " web.port = tcp:0:interface=127.0.0.1 \n "
config + = " timeout.disconnect = 1800 \n "
else :
config + = nodeconfig
fileutil . write ( os . path . join ( basedir , ' tahoe.cfg ' ) , config )
# give subclasses a chance to append lines to the node's tahoe.cfg
# files before they are launched.
self . _set_up_nodes_extra_config ( )
# start clients[0], wait for it's tub to be ready (at which point it
# will have registered the helper furl).
c = self . add_service ( client . Client ( basedir = basedirs [ 0 ] ) )
self . clients . append ( c )
c . set_default_mutable_keysize ( TEST_RSA_KEY_SIZE )
f = open ( os . path . join ( basedirs [ 0 ] , " private " , " helper.furl " ) , " r " )
helper_furl = f . read ( )
f . close ( )
self . helper_furl = helper_furl
if self . numclients > = 4 :
f = open ( os . path . join ( basedirs [ 3 ] , ' tahoe.cfg ' ) , ' ab+ ' )
f . write (
" [client] \n "
" helper.furl = %s \n " % helper_furl )
f . close ( )
# this starts the rest of the clients
for i in range ( 1 , self . numclients ) :
c = self . add_service ( client . Client ( basedir = basedirs [ i ] ) )
self . clients . append ( c )
c . set_default_mutable_keysize ( TEST_RSA_KEY_SIZE )
log . msg ( " STARTING " )
d = self . wait_for_connections ( )
def _connected ( res ) :
log . msg ( " CONNECTED " )
# now find out where the web port was
self . webish_url = self . clients [ 0 ] . getServiceNamed ( " webish " ) . getURL ( )
if self . numclients > = 4 :
# and the helper-using webport
self . helper_webish_url = self . clients [ 3 ] . getServiceNamed ( " webish " ) . getURL ( )
d . addCallback ( _connected )
return d
def _set_up_nodes_extra_config ( self ) :
# for overriding by subclasses
pass
def _grab_stats ( self , res ) :
d = self . stats_gatherer . poll ( )
return d
def bounce_client ( self , num ) :
c = self . clients [ num ]
d = c . disownServiceParent ( )
# I think windows requires a moment to let the connection really stop
# and the port number made available for re-use. TODO: examine the
# behavior, see if this is really the problem, see if we can do
# better than blindly waiting for a second.
d . addCallback ( self . stall , 1.0 )
def _stopped ( res ) :
new_c = client . Client ( basedir = self . getdir ( " client %d " % num ) )
self . clients [ num ] = new_c
new_c . set_default_mutable_keysize ( TEST_RSA_KEY_SIZE )
self . add_service ( new_c )
d . addCallback ( _stopped )
d . addCallback ( lambda res : self . wait_for_connections ( ) )
def _maybe_get_webport ( res ) :
if num == 0 :
# now find out where the web port was
self . webish_url = self . clients [ 0 ] . getServiceNamed ( " webish " ) . getURL ( )
d . addCallback ( _maybe_get_webport )
return d
def add_extra_node ( self , client_num , helper_furl = None ,
add_to_sparent = False ) :
# usually this node is *not* parented to our self.sparent, so we can
# shut it down separately from the rest, to exercise the
# connection-lost code
basedir = self . getdir ( " client %d " % client_num )
if not os . path . isdir ( basedir ) :
fileutil . make_dirs ( basedir )
config = " [client] \n "
config + = " introducer.furl = %s \n " % self . introducer_furl
if helper_furl :
config + = " helper.furl = %s \n " % helper_furl
fileutil . write ( os . path . join ( basedir , ' tahoe.cfg ' ) , config )
c = client . Client ( basedir = basedir )
self . clients . append ( c )
c . set_default_mutable_keysize ( TEST_RSA_KEY_SIZE )
self . numclients + = 1
if add_to_sparent :
c . setServiceParent ( self . sparent )
else :
c . startService ( )
d = self . wait_for_connections ( )
d . addCallback ( lambda res : c )
return d
def _check_connections ( self ) :
for c in self . clients :
if not c . connected_to_introducer ( ) :
return False
sb = c . get_storage_broker ( )
if len ( sb . get_connected_servers ( ) ) != self . numclients :
return False
up = c . getServiceNamed ( " uploader " )
if up . _helper_furl and not up . _helper :
return False
return True
def wait_for_connections ( self , ignored = None ) :
return self . poll ( self . _check_connections , timeout = 200 )
2008-02-08 00:27:30 +00:00
class CountingDataUploadable ( upload . Data ) :
bytes_read = 0
2008-02-08 03:15:37 +00:00
interrupt_after = None
interrupt_after_d = None
2008-02-08 00:27:30 +00:00
def read ( self , length ) :
self . bytes_read + = length
2008-02-08 03:15:37 +00:00
if self . interrupt_after is not None :
if self . bytes_read > self . interrupt_after :
self . interrupt_after = None
self . interrupt_after_d . callback ( self )
2008-02-08 00:27:30 +00:00
return upload . Data . read ( self , length )
2011-01-20 04:32:38 +00:00
class SystemTest ( SystemTestMixin , RunBinTahoeMixin , unittest . TestCase ) :
2009-06-10 16:10:43 +00:00
timeout = 3600 # It takes longer than 960 seconds on Zandr's ARM box.
2006-12-03 02:37:31 +00:00
2006-12-03 03:32:08 +00:00
def test_connections ( self ) :
2007-06-28 18:00:03 +00:00
self . basedir = " system/SystemTest/test_connections "
2006-12-03 07:56:27 +00:00
d = self . set_up_nodes ( )
2007-03-27 23:12:11 +00:00
self . extra_node = None
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . add_extra_node ( self . numclients ) )
2007-01-10 02:40:36 +00:00
def _check ( extra_node ) :
2007-01-16 22:12:49 +00:00
self . extra_node = extra_node
2006-12-03 03:32:08 +00:00
for c in self . clients :
2009-06-23 02:10:47 +00:00
all_peerids = c . get_storage_broker ( ) . get_all_serverids ( )
2007-12-03 21:52:42 +00:00
self . failUnlessEqual ( len ( all_peerids ) , self . numclients + 1 )
2009-06-01 21:06:04 +00:00
sb = c . storage_broker
2011-02-21 01:58:04 +00:00
permuted_peers = sb . get_servers_for_psi ( " a " )
2007-12-03 21:52:42 +00:00
self . failUnlessEqual ( len ( permuted_peers ) , self . numclients + 1 )
2007-08-12 23:29:34 +00:00
2006-12-03 03:32:08 +00:00
d . addCallback ( _check )
2007-01-16 22:12:49 +00:00
def _shutdown_extra_node ( res ) :
2007-03-27 23:12:11 +00:00
if self . extra_node :
2007-04-04 23:09:13 +00:00
return self . extra_node . stopService ( )
2007-03-27 23:12:11 +00:00
return res
2007-01-16 22:12:49 +00:00
d . addBoth ( _shutdown_extra_node )
2006-12-03 03:32:08 +00:00
return d
2007-04-18 23:06:57 +00:00
# test_connections is subsumed by test_upload_and_download, and takes
# quite a while to run on a slow machine (because of all the TLS
# connections that must be established). If we ever rework the introducer
# code to such an extent that we're not sure if it works anymore, we can
# reinstate this test until it does.
del test_connections
2006-12-03 03:32:08 +00:00
2008-01-30 19:24:50 +00:00
def test_upload_and_download_random_key ( self ) :
2008-03-04 06:55:58 +00:00
self . basedir = " system/SystemTest/test_upload_and_download_random_key "
2008-03-24 16:46:06 +00:00
return self . _test_upload_and_download ( convergence = None )
2008-01-30 19:24:50 +00:00
2008-03-24 16:46:06 +00:00
def test_upload_and_download_convergent ( self ) :
self . basedir = " system/SystemTest/test_upload_and_download_convergent "
return self . _test_upload_and_download ( convergence = " some convergence string " )
2008-01-30 19:24:50 +00:00
2008-03-24 16:46:06 +00:00
def _test_upload_and_download ( self , convergence ) :
2007-04-17 20:40:47 +00:00
# we use 4000 bytes of data, which will result in about 400k written
# to disk among all our simulated nodes
DATA = " Some data to upload \n " * 200
2006-12-03 07:56:27 +00:00
d = self . set_up_nodes ( )
2007-08-12 23:29:34 +00:00
def _check_connections ( res ) :
for c in self . clients :
2014-04-21 21:40:28 +00:00
c . encoding_params [ ' happy ' ] = 5
2009-06-23 02:10:47 +00:00
all_peerids = c . get_storage_broker ( ) . get_all_serverids ( )
2007-12-03 21:52:42 +00:00
self . failUnlessEqual ( len ( all_peerids ) , self . numclients )
2009-06-01 21:06:04 +00:00
sb = c . storage_broker
2011-02-21 01:58:04 +00:00
permuted_peers = sb . get_servers_for_psi ( " a " )
2007-12-03 21:52:42 +00:00
self . failUnlessEqual ( len ( permuted_peers ) , self . numclients )
2007-08-12 23:29:34 +00:00
d . addCallback ( _check_connections )
2008-03-12 00:36:25 +00:00
2006-12-03 10:01:43 +00:00
def _do_upload ( res ) :
2006-12-03 07:56:27 +00:00
log . msg ( " UPLOADING " )
2006-12-03 03:32:08 +00:00
u = self . clients [ 0 ] . getServiceNamed ( " uploader " )
2007-04-19 01:29:10 +00:00
self . uploader = u
2007-04-17 20:40:47 +00:00
# we crank the max segsize down to 1024b for the duration of this
# test, so we can exercise multiple segments. It is important
# that this is not a multiple of the segment size, so that the
2007-04-18 03:28:19 +00:00
# tail segment is not the same length as the others. This actualy
# gets rounded up to 1025 to be a multiple of the number of
# required shares (since we use 25 out of 100 FEC).
2008-03-24 16:46:06 +00:00
up = upload . Data ( DATA , convergence = convergence )
2008-01-17 08:18:10 +00:00
up . max_segment_size = 1024
d1 = u . upload ( up )
2006-12-03 03:32:08 +00:00
return d1
2006-12-03 10:01:43 +00:00
d . addCallback ( _do_upload )
2008-02-06 04:01:38 +00:00
def _upload_done ( results ) :
2012-05-22 04:14:44 +00:00
theuri = results . get_uri ( )
download: refactor handling of URI Extension Block and crypttext hash tree, simplify things
Refactor into a class the logic of asking each server in turn until one of them gives an answer
that validates. It is called ValidatedThingObtainer.
Refactor the downloading and verification of the URI Extension Block into a class named
ValidatedExtendedURIProxy.
The new logic of validating UEBs is minimalist: it doesn't require the UEB to contain any
unncessary information, but of course it still accepts such information for backwards
compatibility (so that this new download code is able to download files uploaded with old, and
for that matter with current, upload code).
The new logic of validating UEBs follows the practice of doing all validation up front. This
practice advises one to isolate the validation of incoming data into one place, so that all of
the rest of the code can assume only valid data.
If any redundant information is present in the UEB+URI, the new code cross-checks and asserts
that it is all fully consistent. This closes some issues where the uploader could have
uploaded inconsistent redundant data, which would probably have caused the old downloader to
simply reject that download after getting a Python exception, but perhaps could have caused
greater harm to the old downloader.
I removed the notion of selecting an erasure codec from codec.py based on the string that was
passed in the UEB. Currently "crs" is the only such string that works, so
"_assert(codec_name == 'crs')" is simpler and more explicit. This is also in keeping with the
"validate up front" strategy -- now if someone sets a different string than "crs" in their UEB,
the downloader will reject the download in the "validate this UEB" function instead of in a
separate "select the codec instance" function.
I removed the code to check plaintext hashes and plaintext Merkle Trees. Uploaders do not
produce this information any more (since it potentially exposes confidential information about
the file), and the unit tests for it were disabled. The downloader before this patch would
check that plaintext hash or plaintext merkle tree if they were present, but not complain if
they were absent. The new downloader in this patch complains if they are present and doesn't
check them. (We might in the future re-introduce such hashes over the plaintext, but encrypt
the hashes which are stored in the UEB to preserve confidentiality. This would be a double-
check on the correctness of our own source code -- the current Merkle Tree over the ciphertext
is already sufficient to guarantee the integrity of the download unless there is a bug in our
Merkle Tree or AES implementation.)
This patch increases the lines-of-code count by 8 (from 17,770 to 17,778), and reduces the
uncovered-by-tests lines-of-code count by 24 (from 1408 to 1384). Those numbers would be more
meaningful if we omitted src/allmydata/util/ from the test-coverage statistics.
2008-12-05 15:17:54 +00:00
log . msg ( " upload finished: uri is %s " % ( theuri , ) )
self . uri = theuri
assert isinstance ( self . uri , str ) , self . uri
2009-11-11 22:25:42 +00:00
self . cap = uri . from_string ( self . uri )
2009-12-01 22:44:35 +00:00
self . n = self . clients [ 1 ] . create_node_from_uri ( self . uri )
2006-12-03 10:01:43 +00:00
d . addCallback ( _upload_done )
2007-04-19 01:29:10 +00:00
def _upload_again ( res ) :
2008-03-24 16:46:06 +00:00
# Upload again. If using convergent encryption then this ought to be
2008-01-30 19:24:50 +00:00
# short-circuited, however with the way we currently generate URIs
# (i.e. because they include the roothash), we have to do all of the
# encoding work, and only get to save on the upload part.
2007-04-19 01:29:10 +00:00
log . msg ( " UPLOADING AGAIN " )
2008-03-24 16:46:06 +00:00
up = upload . Data ( DATA , convergence = convergence )
2008-01-17 08:18:10 +00:00
up . max_segment_size = 1024
2010-01-14 22:17:19 +00:00
return self . uploader . upload ( up )
2007-04-19 01:29:10 +00:00
d . addCallback ( _upload_again )
def _download_to_data ( res ) :
log . msg ( " DOWNLOADING " )
2009-12-01 22:44:35 +00:00
return download_to_data ( self . n )
2007-04-19 01:29:10 +00:00
d . addCallback ( _download_to_data )
def _download_to_data_done ( data ) :
2006-12-03 10:01:43 +00:00
log . msg ( " download finished " )
self . failUnlessEqual ( data , DATA )
2007-04-19 01:29:10 +00:00
d . addCallback ( _download_to_data_done )
2007-04-16 20:07:36 +00:00
2008-10-28 20:41:04 +00:00
def _test_read ( res ) :
n = self . clients [ 1 ] . create_node_from_uri ( self . uri )
d = download_to_data ( n )
def _read_done ( data ) :
self . failUnlessEqual ( data , DATA )
d . addCallback ( _read_done )
d . addCallback ( lambda ign :
n . read ( MemoryConsumer ( ) , offset = 1 , size = 4 ) )
def _read_portion_done ( mc ) :
self . failUnlessEqual ( " " . join ( mc . chunks ) , DATA [ 1 : 1 + 4 ] )
d . addCallback ( _read_portion_done )
d . addCallback ( lambda ign :
n . read ( MemoryConsumer ( ) , offset = 2 , size = None ) )
def _read_tail_done ( mc ) :
self . failUnlessEqual ( " " . join ( mc . chunks ) , DATA [ 2 : ] )
d . addCallback ( _read_tail_done )
2008-11-04 22:29:19 +00:00
d . addCallback ( lambda ign :
n . read ( MemoryConsumer ( ) , size = len ( DATA ) + 1000 ) )
def _read_too_much ( mc ) :
self . failUnlessEqual ( " " . join ( mc . chunks ) , DATA )
d . addCallback ( _read_too_much )
2008-10-29 00:56:18 +00:00
2008-10-28 20:41:04 +00:00
return d
d . addCallback ( _test_read )
2008-10-29 00:56:18 +00:00
def _test_bad_read ( res ) :
bad_u = uri . from_string_filenode ( self . uri )
bad_u . key = self . flip_bit ( bad_u . key )
bad_n = self . clients [ 1 ] . create_node_from_uri ( bad_u . to_string ( ) )
# this should cause an error during download
2009-06-25 02:17:07 +00:00
d = self . shouldFail2 ( NoSharesError , " ' download bad node ' " ,
2008-10-29 00:56:18 +00:00
None ,
bad_n . read , MemoryConsumer ( ) , offset = 2 )
return d
d . addCallback ( _test_bad_read )
2007-04-19 01:29:10 +00:00
def _download_nonexistent_uri ( res ) :
baduri = self . mangle_uri ( self . uri )
2009-12-01 22:44:35 +00:00
badnode = self . clients [ 1 ] . create_node_from_uri ( baduri )
2008-02-05 20:05:13 +00:00
log . msg ( " about to download non-existent URI " , level = log . UNUSUAL ,
facility = " tahoe.tests " )
2009-12-01 22:44:35 +00:00
d1 = download_to_data ( badnode )
2007-04-19 01:29:10 +00:00
def _baduri_should_fail ( res ) :
2012-06-12 01:16:36 +00:00
log . msg ( " finished downloading non-existent URI " ,
2008-02-05 20:05:13 +00:00
level = log . UNUSUAL , facility = " tahoe.tests " )
2007-04-19 01:29:10 +00:00
self . failUnless ( isinstance ( res , Failure ) )
2009-06-25 02:17:07 +00:00
self . failUnless ( res . check ( NoSharesError ) ,
" expected NoSharesError, got %s " % res )
2007-04-19 01:29:10 +00:00
d1 . addBoth ( _baduri_should_fail )
return d1
d . addCallback ( _download_nonexistent_uri )
2008-01-11 12:42:55 +00:00
2008-01-15 04:24:26 +00:00
# add a new node, which doesn't accept shares, and only uses the
# helper for upload.
d . addCallback ( lambda res : self . add_extra_node ( self . numclients ,
self . helper_furl ,
add_to_sparent = True ) )
def _added ( extra_node ) :
self . extra_node = extra_node
2014-04-21 21:40:28 +00:00
self . extra_node . encoding_params [ ' happy ' ] = 5
2008-01-15 04:24:26 +00:00
d . addCallback ( _added )
2012-06-12 06:19:30 +00:00
def _has_helper ( ) :
uploader = self . extra_node . getServiceNamed ( " uploader " )
furl , connected = uploader . get_helper_info ( )
return connected
d . addCallback ( lambda ign : self . poll ( _has_helper ) )
2008-01-31 01:49:02 +00:00
HELPER_DATA = " Data that needs help to upload " * 1000
2008-01-11 12:42:55 +00:00
def _upload_with_helper ( res ) :
2008-03-24 16:46:06 +00:00
u = upload . Data ( HELPER_DATA , convergence = convergence )
2008-01-15 04:24:26 +00:00
d = self . extra_node . upload ( u )
2008-02-06 04:01:38 +00:00
def _uploaded ( results ) :
2012-05-22 04:14:44 +00:00
n = self . clients [ 1 ] . create_node_from_uri ( results . get_uri ( ) )
2009-12-01 22:44:35 +00:00
return download_to_data ( n )
2008-01-11 12:42:55 +00:00
d . addCallback ( _uploaded )
def _check ( newdata ) :
2008-01-31 01:49:02 +00:00
self . failUnlessEqual ( newdata , HELPER_DATA )
2008-01-11 12:42:55 +00:00
d . addCallback ( _check )
return d
d . addCallback ( _upload_with_helper )
2008-01-31 01:49:02 +00:00
def _upload_duplicate_with_helper ( res ) :
2008-03-24 16:46:06 +00:00
u = upload . Data ( HELPER_DATA , convergence = convergence )
2008-01-31 01:49:02 +00:00
u . debug_stash_RemoteEncryptedUploadable = True
d = self . extra_node . upload ( u )
2008-02-06 04:01:38 +00:00
def _uploaded ( results ) :
2012-05-22 04:14:44 +00:00
n = self . clients [ 1 ] . create_node_from_uri ( results . get_uri ( ) )
2009-12-01 22:44:35 +00:00
return download_to_data ( n )
2008-01-31 01:49:02 +00:00
d . addCallback ( _uploaded )
def _check ( newdata ) :
self . failUnlessEqual ( newdata , HELPER_DATA )
self . failIf ( hasattr ( u , " debug_RemoteEncryptedUploadable " ) ,
" uploadable started uploading, should have been avoided " )
d . addCallback ( _check )
return d
2008-03-24 16:46:06 +00:00
if convergence is not None :
2008-01-31 01:49:02 +00:00
d . addCallback ( _upload_duplicate_with_helper )
2012-06-12 01:22:35 +00:00
d . addCallback ( fireEventually )
2008-01-15 04:24:26 +00:00
def _upload_resumable ( res ) :
DATA = " Data that needs help to upload and gets interrupted " * 1000
2008-03-24 16:46:06 +00:00
u1 = CountingDataUploadable ( DATA , convergence = convergence )
u2 = CountingDataUploadable ( DATA , convergence = convergence )
2008-01-17 08:18:10 +00:00
2008-02-08 03:15:37 +00:00
# we interrupt the connection after about 5kB by shutting down
2012-06-12 01:22:35 +00:00
# the helper, then restarting it.
2008-02-08 03:15:37 +00:00
u1 . interrupt_after = 5000
u1 . interrupt_after_d = defer . Deferred ( )
2012-06-12 01:22:35 +00:00
bounced_d = defer . Deferred ( )
def _do_bounce ( res ) :
d = self . bounce_client ( 0 )
d . addBoth ( bounced_d . callback )
u1 . interrupt_after_d . addCallback ( _do_bounce )
2008-02-08 00:27:30 +00:00
2008-01-17 08:18:10 +00:00
# sneak into the helper and reduce its chunk size, so that our
2008-01-15 04:24:26 +00:00
# debug_interrupt will sever the connection on about the fifth
2008-01-17 08:18:10 +00:00
# chunk fetched. This makes sure that we've started to write the
# new shares before we abandon them, which exercises the
# abort/delete-partial-share code. TODO: find a cleaner way to do
# this. I know that this will affect later uses of the helper in
# this same test run, but I'm not currently worried about it.
offloaded . CHKCiphertextFetcher . CHUNK_SIZE = 1000
2008-01-15 04:24:26 +00:00
2012-06-12 01:22:35 +00:00
upload_d = self . extra_node . upload ( u1 )
# The upload will start, and bounce_client() will be called after
# about 5kB. bounced_d will fire after bounce_client() finishes
# shutting down and restarting the node.
d = bounced_d
def _bounced ( ign ) :
# By this point, the upload should have failed because of the
# interruption. upload_d will fire in a moment
def _should_not_finish ( res ) :
self . fail ( " interrupted upload should have failed, not "
" finished with result %s " % ( res , ) )
def _interrupted ( f ) :
f . trap ( DeadReferenceError )
# make sure we actually interrupted it before finishing
# the file
self . failUnless ( u1 . bytes_read < len ( DATA ) ,
" read %d out of %d total " %
( u1 . bytes_read , len ( DATA ) ) )
upload_d . addCallbacks ( _should_not_finish , _interrupted )
return upload_d
d . addCallback ( _bounced )
2008-01-15 04:24:26 +00:00
def _disconnected ( res ) :
# check to make sure the storage servers aren't still hanging
# on to the partial share: their incoming/ directories should
# now be empty.
2008-01-17 08:18:10 +00:00
log . msg ( " disconnected " , level = log . NOISY ,
facility = " tahoe.test.test_system " )
2008-01-15 04:24:26 +00:00
for i in range ( self . numclients ) :
incdir = os . path . join ( self . getdir ( " client %d " % i ) ,
" storage " , " shares " , " incoming " )
2008-01-31 23:26:28 +00:00
self . failIf ( os . path . exists ( incdir ) and os . listdir ( incdir ) )
2008-01-15 04:24:26 +00:00
d . addCallback ( _disconnected )
2008-02-06 21:05:11 +00:00
d . addCallback ( lambda res :
2012-06-12 06:19:30 +00:00
log . msg ( " wait_for_helper " , level = log . NOISY ,
2008-02-06 21:05:11 +00:00
facility = " tahoe.test.test_system " ) )
2012-06-12 06:19:30 +00:00
# then we need to wait for the extra node to reestablish its
# connection to the helper.
d . addCallback ( lambda ign : self . poll ( _has_helper ) )
2008-02-06 21:05:11 +00:00
d . addCallback ( lambda res :
log . msg ( " uploading again " , level = log . NOISY ,
facility = " tahoe.test.test_system " ) )
d . addCallback ( lambda res : self . extra_node . upload ( u2 ) )
2008-01-15 04:24:26 +00:00
2008-02-06 04:01:38 +00:00
def _uploaded ( results ) :
2012-05-22 04:14:44 +00:00
cap = results . get_uri ( )
2008-01-17 08:18:10 +00:00
log . msg ( " Second upload complete " , level = log . NOISY ,
facility = " tahoe.test.test_system " )
2008-02-08 00:27:30 +00:00
# this is really bytes received rather than sent, but it's
# convenient and basically measures the same thing
2012-05-22 04:14:28 +00:00
bytes_sent = results . get_ciphertext_fetched ( )
2010-06-16 00:16:48 +00:00
self . failUnless ( isinstance ( bytes_sent , ( int , long ) ) , bytes_sent )
2008-01-30 19:24:50 +00:00
# We currently don't support resumption of upload if the data is
# encrypted with a random key. (Because that would require us
# to store the key locally and re-use it on the next upload of
# this file, which isn't a bad thing to do, but we currently
# don't do it.)
2008-03-24 16:46:06 +00:00
if convergence is not None :
2008-01-30 19:24:50 +00:00
# Make sure we did not have to read the whole file the
# second time around .
2008-02-08 00:27:30 +00:00
self . failUnless ( bytes_sent < len ( DATA ) ,
2010-06-16 00:16:48 +00:00
" resumption didn ' t save us any work: "
" read %r bytes out of %r total " %
( bytes_sent , len ( DATA ) ) )
2008-01-30 19:24:50 +00:00
else :
# Make sure we did have to read the whole file the second
# time around -- because the one that we partially uploaded
# earlier was encrypted with a different random key.
2008-02-08 00:27:30 +00:00
self . failIf ( bytes_sent < len ( DATA ) ,
2008-01-30 19:24:50 +00:00
" resumption saved us some work even though we were using random keys: "
2010-06-16 00:16:48 +00:00
" read %r bytes out of %r total " %
2008-02-08 00:27:30 +00:00
( bytes_sent , len ( DATA ) ) )
2009-12-01 22:44:35 +00:00
n = self . clients [ 1 ] . create_node_from_uri ( cap )
return download_to_data ( n )
2008-01-15 04:24:26 +00:00
d . addCallback ( _uploaded )
2008-01-17 08:18:10 +00:00
2008-01-15 04:24:26 +00:00
def _check ( newdata ) :
self . failUnlessEqual ( newdata , DATA )
2008-03-24 16:46:06 +00:00
# If using convergent encryption, then also check that the
# helper has removed the temp file from its directories.
if convergence is not None :
2008-01-30 19:24:50 +00:00
basedir = os . path . join ( self . getdir ( " client0 " ) , " helper " )
files = os . listdir ( os . path . join ( basedir , " CHK_encoding " ) )
self . failUnlessEqual ( files , [ ] )
files = os . listdir ( os . path . join ( basedir , " CHK_incoming " ) )
self . failUnlessEqual ( files , [ ] )
2008-01-15 04:24:26 +00:00
d . addCallback ( _check )
return d
2008-01-17 08:18:10 +00:00
d . addCallback ( _upload_resumable )
2008-01-15 04:24:26 +00:00
2008-12-04 22:01:24 +00:00
def _grab_stats ( ignored ) :
# the StatsProvider doesn't normally publish a FURL:
# instead it passes a live reference to the StatsGatherer
# (if and when it connects). To exercise the remote stats
# interface, we manually publish client0's StatsProvider
# and use client1 to query it.
sp = self . clients [ 0 ] . stats_provider
sp_furl = self . clients [ 0 ] . tub . registerReference ( sp )
d = self . clients [ 1 ] . tub . getReference ( sp_furl )
d . addCallback ( lambda sp_rref : sp_rref . callRemote ( " get_stats " ) )
def _got_stats ( stats ) :
#print "STATS"
#from pprint import pprint
#pprint(stats)
s = stats [ " stats " ]
self . failUnlessEqual ( s [ " storage_server.accepting_immutable_shares " ] , 1 )
c = stats [ " counters " ]
2008-12-05 00:27:04 +00:00
self . failUnless ( " storage_server.allocate " in c )
2008-12-04 22:01:24 +00:00
d . addCallback ( _got_stats )
return d
d . addCallback ( _grab_stats )
2006-12-03 02:37:31 +00:00
return d
2006-12-03 07:56:27 +00:00
2010-07-19 04:50:47 +00:00
def _find_all_shares ( self , basedir ) :
2007-11-14 06:08:15 +00:00
shares = [ ]
for ( dirpath , dirnames , filenames ) in os . walk ( basedir ) :
if " storage " not in dirpath :
continue
if not filenames :
continue
pieces = dirpath . split ( os . sep )
2009-02-21 07:17:10 +00:00
if ( len ( pieces ) > = 5
and pieces [ - 4 ] == " storage "
and pieces [ - 3 ] == " shares " ) :
2008-01-31 23:26:28 +00:00
# we're sitting in .../storage/shares/$START/$SINDEX , and there
2007-11-14 06:08:15 +00:00
# are sharefiles here
2008-01-31 23:26:28 +00:00
assert pieces [ - 5 ] . startswith ( " client " )
client_num = int ( pieces [ - 5 ] [ - 1 ] )
2007-11-14 06:08:15 +00:00
storage_index_s = pieces [ - 1 ]
2009-02-18 21:46:55 +00:00
storage_index = si_a2b ( storage_index_s )
2007-11-14 06:08:15 +00:00
for sharename in filenames :
shnum = int ( sharename )
filename = os . path . join ( dirpath , sharename )
data = ( client_num , storage_index , filename , shnum )
shares . append ( data )
if not shares :
self . fail ( " unable to find any share files in %s " % basedir )
return shares
def _corrupt_mutable_share ( self , filename , which ) :
2009-02-18 21:46:55 +00:00
msf = MutableShareFile ( filename )
2007-11-14 06:08:15 +00:00
datav = msf . readv ( [ ( 0 , 1000000 ) ] )
final_share = datav [ 0 ]
assert len ( final_share ) < 1000000 # ought to be truncated
2008-04-11 21:31:16 +00:00
pieces = mutable_layout . unpack_share ( final_share )
2007-11-14 06:08:15 +00:00
( seqnum , root_hash , IV , k , N , segsize , datalen ,
verification_key , signature , share_hash_chain , block_hash_tree ,
share_data , enc_privkey ) = pieces
if which == " seqnum " :
seqnum = seqnum + 15
elif which == " R " :
root_hash = self . flip_bit ( root_hash )
elif which == " IV " :
IV = self . flip_bit ( IV )
elif which == " segsize " :
segsize = segsize + 15
elif which == " pubkey " :
verification_key = self . flip_bit ( verification_key )
elif which == " signature " :
signature = self . flip_bit ( signature )
elif which == " share_hash_chain " :
nodenum = share_hash_chain . keys ( ) [ 0 ]
share_hash_chain [ nodenum ] = self . flip_bit ( share_hash_chain [ nodenum ] )
elif which == " block_hash_tree " :
block_hash_tree [ - 1 ] = self . flip_bit ( block_hash_tree [ - 1 ] )
elif which == " share_data " :
share_data = self . flip_bit ( share_data )
elif which == " encprivkey " :
enc_privkey = self . flip_bit ( enc_privkey )
2008-04-11 21:31:16 +00:00
prefix = mutable_layout . pack_prefix ( seqnum , root_hash , IV , k , N ,
segsize , datalen )
final_share = mutable_layout . pack_share ( prefix ,
verification_key ,
signature ,
share_hash_chain ,
block_hash_tree ,
share_data ,
enc_privkey )
2007-11-14 06:08:15 +00:00
msf . writev ( [ ( 0 , final_share ) ] , None )
2007-11-07 01:57:11 +00:00
2008-07-25 22:13:00 +00:00
2007-11-07 01:57:11 +00:00
def test_mutable ( self ) :
self . basedir = " system/SystemTest/test_mutable "
2007-11-07 21:19:01 +00:00
DATA = " initial contents go here. " # 25 bytes % 3 != 0
2011-08-07 00:44:59 +00:00
DATA_uploadable = MutableData ( DATA )
2007-11-08 04:01:39 +00:00
NEWDATA = " new contents yay "
2011-08-07 00:44:59 +00:00
NEWDATA_uploadable = MutableData ( NEWDATA )
2007-11-08 04:01:39 +00:00
NEWERDATA = " this is getting old "
2011-08-07 00:44:59 +00:00
NEWERDATA_uploadable = MutableData ( NEWERDATA )
2007-11-08 04:01:39 +00:00
2016-04-28 07:05:30 +00:00
d = self . set_up_nodes ( )
2007-11-15 21:55:00 +00:00
2007-11-07 01:57:11 +00:00
def _create_mutable ( res ) :
c = self . clients [ 0 ]
2007-11-08 11:07:33 +00:00
log . msg ( " starting create_mutable_file " )
2011-08-07 00:44:59 +00:00
d1 = c . create_mutable_file ( DATA_uploadable )
2007-11-07 01:57:11 +00:00
def _done ( res ) :
log . msg ( " DONE: %s " % ( res , ) )
2007-11-07 21:19:01 +00:00
self . _mutable_node_1 = res
2007-11-08 04:01:39 +00:00
d1 . addCallback ( _done )
2007-11-07 01:57:11 +00:00
return d1
d . addCallback ( _create_mutable )
2016-09-09 17:41:20 +00:00
@defer.inlineCallbacks
2007-11-07 02:10:49 +00:00
def _test_debug ( res ) :
# find a share. It is important to run this while there is only
# one slot in the grid.
2010-07-19 04:50:47 +00:00
shares = self . _find_all_shares ( self . basedir )
2007-11-14 06:08:15 +00:00
( client_num , storage_index , filename , shnum ) = shares [ 0 ]
2007-11-07 02:10:49 +00:00
log . msg ( " test_system.SystemTest.test_mutable._test_debug using %s "
% filename )
log . msg ( " for clients[ %d ] " % client_num )
2016-09-09 17:41:20 +00:00
rc , output , err = yield run_cli ( " debug " , " dump-share " , " --offsets " ,
filename )
2007-11-07 02:10:49 +00:00
self . failUnlessEqual ( rc , 0 )
2007-11-07 02:50:33 +00:00
try :
self . failUnless ( " Mutable slot found: \n " in output )
self . failUnless ( " share_type: SDMF \n " in output )
peerid = idlib . nodeid_b2a ( self . clients [ client_num ] . nodeid )
self . failUnless ( " WE for nodeid: %s \n " % peerid in output )
self . failUnless ( " num_extra_leases: 0 \n " in output )
self . failUnless ( " secrets are for nodeid: %s \n " % peerid
in output )
self . failUnless ( " SDMF contents: \n " in output )
self . failUnless ( " seqnum: 1 \n " in output )
self . failUnless ( " required_shares: 3 \n " in output )
self . failUnless ( " total_shares: 10 \n " in output )
2007-12-03 21:52:42 +00:00
self . failUnless ( " segsize: 27 \n " in output , ( output , filename ) )
2007-11-07 02:50:33 +00:00
self . failUnless ( " datalen: 25 \n " in output )
# the exact share_hash_chain nodes depends upon the sharenum,
# and is more of a hassle to compute than I want to deal with
# now
self . failUnless ( " share_hash_chain: " in output )
self . failUnless ( " block_hash_tree: 1 nodes \n " in output )
2008-07-07 21:11:02 +00:00
expected = ( " verify-cap: URI:SSK-Verifier: %s : " %
base32 . b2a ( storage_index ) )
self . failUnless ( expected in output )
2007-11-07 02:50:33 +00:00
except unittest . FailTest :
print
print " dump-share output was: "
print output
raise
2007-11-07 02:10:49 +00:00
d . addCallback ( _test_debug )
2007-11-07 21:19:01 +00:00
# test retrieval
# first, let's see if we can use the existing node to retrieve the
# contents. This allows it to use the cached pubkey and maybe the
# latest-known sharemap.
2008-04-18 00:51:38 +00:00
d . addCallback ( lambda res : self . _mutable_node_1 . download_best_version ( ) )
2007-11-07 21:19:01 +00:00
def _check_download_1 ( res ) :
self . failUnlessEqual ( res , DATA )
# now we see if we can retrieve the data from a new node,
# constructed using the URI of the original one. We do this test
# on the same client that uploaded the data.
uri = self . _mutable_node_1 . get_uri ( )
2007-11-08 11:07:33 +00:00
log . msg ( " starting retrieve1 " )
2007-11-09 09:54:51 +00:00
newnode = self . clients [ 0 ] . create_node_from_uri ( uri )
2008-05-09 01:02:55 +00:00
newnode_2 = self . clients [ 0 ] . create_node_from_uri ( uri )
self . failUnlessIdentical ( newnode , newnode_2 )
2008-04-18 00:51:38 +00:00
return newnode . download_best_version ( )
2007-11-07 21:19:01 +00:00
d . addCallback ( _check_download_1 )
def _check_download_2 ( res ) :
self . failUnlessEqual ( res , DATA )
# same thing, but with a different client
uri = self . _mutable_node_1 . get_uri ( )
2007-11-09 09:54:51 +00:00
newnode = self . clients [ 1 ] . create_node_from_uri ( uri )
2007-11-08 11:07:33 +00:00
log . msg ( " starting retrieve2 " )
2008-04-18 00:51:38 +00:00
d1 = newnode . download_best_version ( )
2007-11-08 04:01:39 +00:00
d1 . addCallback ( lambda res : ( res , newnode ) )
return d1
2007-11-07 21:19:01 +00:00
d . addCallback ( _check_download_2 )
2007-11-08 04:01:39 +00:00
def _check_download_3 ( ( res , newnode ) ) :
2007-11-07 21:19:01 +00:00
self . failUnlessEqual ( res , DATA )
2007-11-08 04:01:39 +00:00
# replace the data
2007-11-08 11:07:33 +00:00
log . msg ( " starting replace1 " )
2011-08-07 00:44:59 +00:00
d1 = newnode . overwrite ( NEWDATA_uploadable )
2008-04-18 00:51:38 +00:00
d1 . addCallback ( lambda res : newnode . download_best_version ( ) )
2007-11-08 04:01:39 +00:00
return d1
2007-11-07 21:19:01 +00:00
d . addCallback ( _check_download_3 )
2007-11-08 04:01:39 +00:00
def _check_download_4 ( res ) :
self . failUnlessEqual ( res , NEWDATA )
# now create an even newer node and replace the data on it. This
# new node has never been used for download before.
uri = self . _mutable_node_1 . get_uri ( )
2007-11-09 09:54:51 +00:00
newnode1 = self . clients [ 2 ] . create_node_from_uri ( uri )
newnode2 = self . clients [ 3 ] . create_node_from_uri ( uri )
2007-11-14 06:08:15 +00:00
self . _newnode3 = self . clients [ 3 ] . create_node_from_uri ( uri )
2007-11-08 11:07:33 +00:00
log . msg ( " starting replace2 " )
2011-08-07 00:44:59 +00:00
d1 = newnode1 . overwrite ( NEWERDATA_uploadable )
2008-04-18 00:51:38 +00:00
d1 . addCallback ( lambda res : newnode2 . download_best_version ( ) )
2007-11-08 04:01:39 +00:00
return d1
2007-11-08 11:07:33 +00:00
d . addCallback ( _check_download_4 )
2007-11-08 04:01:39 +00:00
def _check_download_5 ( res ) :
2007-11-08 11:07:33 +00:00
log . msg ( " finished replace2 " )
2007-11-08 04:01:39 +00:00
self . failUnlessEqual ( res , NEWERDATA )
2007-12-03 22:21:14 +00:00
d . addCallback ( _check_download_5 )
def _corrupt_shares ( res ) :
# run around and flip bits in all but k of the shares, to test
# the hash checks
2010-07-19 04:50:47 +00:00
shares = self . _find_all_shares ( self . basedir )
2007-12-03 22:21:14 +00:00
## sort by share number
#shares.sort( lambda a,b: cmp(a[3], b[3]) )
where = dict ( [ ( shnum , filename )
for ( client_num , storage_index , filename , shnum )
in shares ] )
assert len ( where ) == 10 # this test is designed for 3-of-10
for shnum , filename in where . items ( ) :
# shares 7,8,9 are left alone. read will check
# (share_hash_chain, block_hash_tree, share_data). New
# seqnum+R pairs will trigger a check of (seqnum, R, IV,
# segsize, signature).
if shnum == 0 :
# read: this will trigger "pubkey doesn't match
# fingerprint".
self . _corrupt_mutable_share ( filename , " pubkey " )
self . _corrupt_mutable_share ( filename , " encprivkey " )
elif shnum == 1 :
# triggers "signature is invalid"
self . _corrupt_mutable_share ( filename , " seqnum " )
elif shnum == 2 :
# triggers "signature is invalid"
self . _corrupt_mutable_share ( filename , " R " )
elif shnum == 3 :
# triggers "signature is invalid"
self . _corrupt_mutable_share ( filename , " segsize " )
elif shnum == 4 :
self . _corrupt_mutable_share ( filename , " share_hash_chain " )
elif shnum == 5 :
self . _corrupt_mutable_share ( filename , " block_hash_tree " )
elif shnum == 6 :
self . _corrupt_mutable_share ( filename , " share_data " )
# other things to correct: IV, signature
# 7,8,9 are left alone
# note that initial_query_count=5 means that we'll hit the
# first 5 servers in effectively random order (based upon
# response time), so we won't necessarily ever get a "pubkey
# doesn't match fingerprint" error (if we hit shnum>=1 before
# shnum=0, we pull the pubkey from there). To get repeatable
# specific failures, we need to set initial_query_count=1,
# but of course that will change the sequencing behavior of
# the retrieval process. TODO: find a reasonable way to make
# this a parameter, probably when we expand this test to test
# for one failure mode at a time.
# when we retrieve this, we should get three signature
# failures (where we've mangled seqnum, R, and segsize). The
# pubkey mangling
d . addCallback ( _corrupt_shares )
2008-04-18 00:51:38 +00:00
d . addCallback ( lambda res : self . _newnode3 . download_best_version ( ) )
2007-12-03 22:21:14 +00:00
d . addCallback ( _check_download_5 )
def _check_empty_file ( res ) :
2007-11-14 06:08:15 +00:00
# make sure we can create empty files, this usually screws up the
# segsize math
2011-08-07 00:44:59 +00:00
d1 = self . clients [ 2 ] . create_mutable_file ( MutableData ( " " ) )
2008-04-18 00:51:38 +00:00
d1 . addCallback ( lambda newnode : newnode . download_best_version ( ) )
2007-11-08 11:31:00 +00:00
d1 . addCallback ( lambda res : self . failUnlessEqual ( " " , res ) )
return d1
2007-11-14 06:08:15 +00:00
d . addCallback ( _check_empty_file )
2007-11-08 04:01:39 +00:00
2009-10-12 22:45:06 +00:00
d . addCallback ( lambda res : self . clients [ 0 ] . create_dirnode ( ) )
2007-11-08 11:31:00 +00:00
def _created_dirnode ( dnode ) :
2007-12-03 21:52:42 +00:00
log . msg ( " _created_dirnode( %s ) " % ( dnode , ) )
2007-11-08 11:31:00 +00:00
d1 = dnode . list ( )
d1 . addCallback ( lambda children : self . failUnlessEqual ( children , { } ) )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : dnode . has_child ( u " edgar " ) )
2007-11-08 11:31:00 +00:00
d1 . addCallback ( lambda answer : self . failUnlessEqual ( answer , False ) )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : dnode . set_node ( u " see recursive " , dnode ) )
d1 . addCallback ( lambda res : dnode . has_child ( u " see recursive " ) )
2007-11-08 11:31:00 +00:00
d1 . addCallback ( lambda answer : self . failUnlessEqual ( answer , True ) )
2008-10-22 00:03:07 +00:00
d1 . addCallback ( lambda res : dnode . build_manifest ( ) . when_done ( ) )
2008-11-19 22:03:47 +00:00
d1 . addCallback ( lambda res :
self . failUnlessEqual ( len ( res [ " manifest " ] ) , 1 ) )
2007-11-08 11:31:00 +00:00
return d1
d . addCallback ( _created_dirnode )
2007-11-07 01:57:11 +00:00
return d
2007-04-19 01:29:10 +00:00
def flip_bit ( self , good ) :
return good [ : - 1 ] + chr ( ord ( good [ - 1 ] ) ^ 0x01 )
def mangle_uri ( self , gooduri ) :
2007-07-22 01:23:15 +00:00
# change the key, which changes the storage index, which means we'll
# be asking about the wrong file, so nobody will have any shares
2009-11-11 22:45:42 +00:00
u = uri . from_string ( gooduri )
2007-07-22 01:23:15 +00:00
u2 = uri . CHKFileURI ( key = self . flip_bit ( u . key ) ,
2007-07-21 22:40:36 +00:00
uri_extension_hash = u . uri_extension_hash ,
needed_shares = u . needed_shares ,
total_shares = u . total_shares ,
size = u . size )
return u2 . to_string ( )
2007-04-19 01:29:10 +00:00
2007-06-08 22:59:16 +00:00
# TODO: add a test which mangles the uri_extension_hash instead, and
# should fail due to not being able to get a valid uri_extension block.
# Also a test which sneakily mangles the uri_extension block to change
# some of the validation data, so it will fail in the post-download phase
# when the file's crypttext integrity check fails. Do the same thing for
# the key, which should cause the download to fail the post-download
2007-06-10 03:46:04 +00:00
# plaintext_hash check.
2007-04-26 00:53:10 +00:00
2010-01-14 20:11:19 +00:00
def test_filesystem ( self ) :
self . basedir = " system/SystemTest/test_filesystem "
2007-07-12 20:22:36 +00:00
self . data = LARGE_DATA
2008-07-25 22:29:31 +00:00
d = self . set_up_nodes ( use_stats_gatherer = True )
2010-01-07 19:13:25 +00:00
def _new_happy_semantics ( ign ) :
for c in self . clients :
2014-04-21 21:40:28 +00:00
c . encoding_params [ ' happy ' ] = 1
2010-01-07 19:13:25 +00:00
d . addCallback ( _new_happy_semantics )
2008-03-12 00:36:25 +00:00
d . addCallback ( self . _test_introweb )
2007-06-28 18:00:03 +00:00
d . addCallback ( self . log , " starting publish " )
2007-09-25 01:12:37 +00:00
d . addCallback ( self . _do_publish1 )
d . addCallback ( self . _test_runner )
d . addCallback ( self . _do_publish2 )
2007-12-03 21:52:42 +00:00
# at this point, we have the following filesystem (where "R" denotes
# self._root_directory_uri):
# R
# R/subdir1
# R/subdir1/mydata567
# R/subdir1/subdir2/
# R/subdir1/subdir2/mydata992
2007-06-28 18:00:03 +00:00
2008-02-11 22:26:58 +00:00
d . addCallback ( lambda res : self . bounce_client ( 0 ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( self . log , " bounced client0 " )
d . addCallback ( self . _check_publish1 )
d . addCallback ( self . log , " did _check_publish1 " )
d . addCallback ( self . _check_publish2 )
d . addCallback ( self . log , " did _check_publish2 " )
d . addCallback ( self . _do_publish_private )
d . addCallback ( self . log , " did _do_publish_private " )
2008-01-04 00:02:05 +00:00
# now we also have (where "P" denotes a new dir):
2007-12-03 21:52:42 +00:00
# P/personal/sekrit data
# P/s2-rw -> /subdir1/subdir2/
# P/s2-ro -> /subdir1/subdir2/ (read-only)
2007-06-28 18:00:03 +00:00
d . addCallback ( self . _check_publish_private )
d . addCallback ( self . log , " did _check_publish_private " )
2006-12-07 19:48:06 +00:00
d . addCallback ( self . _test_web )
2007-09-26 19:06:55 +00:00
d . addCallback ( self . _test_control )
2007-10-12 02:20:41 +00:00
d . addCallback ( self . _test_cli )
2007-12-04 22:55:27 +00:00
# P now has four top-level children:
# P/personal/sekrit data
# P/s2-ro/
# P/s2-rw/
# P/test_put/ (empty)
2007-10-15 23:16:39 +00:00
d . addCallback ( self . _test_checker )
2006-12-04 05:42:19 +00:00
return d
2008-03-12 00:36:25 +00:00
def _test_introweb ( self , res ) :
2017-02-21 22:31:37 +00:00
d = do_http ( " get " , self . introweb_url )
2008-03-12 00:36:25 +00:00
def _check ( res ) :
try :
2010-09-10 15:41:35 +00:00
self . failUnless ( " %s : %s " % ( allmydata . __appname__ , allmydata . __version__ ) in res )
verstr = str ( allmydata . __version__ )
# The Python "rational version numbering" convention
# disallows "-r$REV" but allows ".post$REV"
# instead. Eventually we'll probably move to
# that. When we do, this test won't go red:
ix = verstr . rfind ( ' -r ' )
if ix != - 1 :
altverstr = verstr [ : ix ] + ' .post ' + verstr [ ix + 2 : ]
else :
ix = verstr . rfind ( ' .post ' )
if ix != - 1 :
altverstr = verstr [ : ix ] + ' -r ' + verstr [ ix + 5 : ]
else :
altverstr = verstr
appverstr = " %s : %s " % ( allmydata . __appname__ , verstr )
newappverstr = " %s : %s " % ( allmydata . __appname__ , altverstr )
self . failUnless ( ( appverstr in res ) or ( newappverstr in res ) , ( appverstr , newappverstr , res ) )
new introducer: signed extensible dictionary-based messages! refs #466
This introduces new client and server halves to the Introducer (renaming the
old one with a _V1 suffix). Both have fallbacks to accomodate talking to a
different version: the publishing client switches on whether the server's
.get_version() advertises V2 support, the server switches on which
subscription method was invoked by the subscribing client.
The V2 protocol sends a three-tuple of (serialized announcement dictionary,
signature, pubkey) for each announcement. The V2 server dispatches messages
to subscribers according to the service-name, and throws errors for invalid
signatures, but does not otherwise examine the messages. The V2 receiver's
subscription callback will receive a (serverid, ann_dict) pair. The
'serverid' will be equal to the pubkey if all of the following are true:
the originating client is V2, and was told a privkey to use
the announcement went through a V2 server
the signature is valid
If not, 'serverid' will be equal to the tubid portion of the announced FURL,
as was the case for V1 receivers.
Servers will create a keypair if one does not exist yet, stored in
private/server.privkey .
The signed announcement dictionary puts the server FURL in a key named
"anonymous-storage-FURL", which anticipates upcoming Accounting-related
changes in the server advertisements. It also provides a key named
"permutation-seed-base32" to tell clients what permutation seed to use. This
is computed at startup, using tubid if there are existing shares, otherwise
the pubkey, to retain share-order compatibility for existing servers.
2011-11-20 10:21:32 +00:00
self . failUnless ( " Announcement Summary: storage: 5 " in res )
2008-03-25 19:56:12 +00:00
self . failUnless ( " Subscription Summary: storage: 5 " in res )
2012-03-12 19:35:36 +00:00
self . failUnless ( " tahoe.css " in res )
2008-03-12 00:36:25 +00:00
except unittest . FailTest :
print
print " GET %s output was: " % self . introweb_url
print res
raise
d . addCallback ( _check )
2012-03-12 19:35:36 +00:00
# make sure it serves the CSS too
2017-02-21 22:31:37 +00:00
d . addCallback ( lambda res : do_http ( " get " , self . introweb_url + " tahoe.css " ) )
d . addCallback ( lambda res : do_http ( " get " , self . introweb_url + " ?t=json " ) )
2008-03-25 19:56:12 +00:00
def _check_json ( res ) :
2017-01-19 22:39:53 +00:00
data = json . loads ( res )
2008-03-25 19:56:12 +00:00
try :
self . failUnlessEqual ( data [ " subscription_summary " ] ,
{ " storage " : 5 } )
self . failUnlessEqual ( data [ " announcement_summary " ] ,
new introducer: signed extensible dictionary-based messages! refs #466
This introduces new client and server halves to the Introducer (renaming the
old one with a _V1 suffix). Both have fallbacks to accomodate talking to a
different version: the publishing client switches on whether the server's
.get_version() advertises V2 support, the server switches on which
subscription method was invoked by the subscribing client.
The V2 protocol sends a three-tuple of (serialized announcement dictionary,
signature, pubkey) for each announcement. The V2 server dispatches messages
to subscribers according to the service-name, and throws errors for invalid
signatures, but does not otherwise examine the messages. The V2 receiver's
subscription callback will receive a (serverid, ann_dict) pair. The
'serverid' will be equal to the pubkey if all of the following are true:
the originating client is V2, and was told a privkey to use
the announcement went through a V2 server
the signature is valid
If not, 'serverid' will be equal to the tubid portion of the announced FURL,
as was the case for V1 receivers.
Servers will create a keypair if one does not exist yet, stored in
private/server.privkey .
The signed announcement dictionary puts the server FURL in a key named
"anonymous-storage-FURL", which anticipates upcoming Accounting-related
changes in the server advertisements. It also provides a key named
"permutation-seed-base32" to tell clients what permutation seed to use. This
is computed at startup, using tubid if there are existing shares, otherwise
the pubkey, to retain share-order compatibility for existing servers.
2011-11-20 10:21:32 +00:00
{ " storage " : 5 } )
2008-03-25 19:56:12 +00:00
except unittest . FailTest :
print
print " GET %s ?t=json output was: " % self . introweb_url
print res
raise
d . addCallback ( _check_json )
2008-03-12 00:36:25 +00:00
return d
2007-09-25 01:12:37 +00:00
def _do_publish1 ( self , res ) :
2008-03-24 16:46:06 +00:00
ut = upload . Data ( self . data , convergence = None )
2007-06-28 18:00:03 +00:00
c0 = self . clients [ 0 ]
2009-10-12 22:45:06 +00:00
d = c0 . create_dirnode ( )
2007-12-03 21:52:42 +00:00
def _made_root ( new_dirnode ) :
self . _root_directory_uri = new_dirnode . get_uri ( )
return c0 . create_node_from_uri ( self . _root_directory_uri )
d . addCallback ( _made_root )
2009-10-13 02:15:20 +00:00
d . addCallback ( lambda root : root . create_subdirectory ( u " subdir1 " ) )
2007-06-28 22:05:33 +00:00
def _made_subdir1 ( subdir1_node ) :
2007-09-25 01:12:37 +00:00
self . _subdir1_node = subdir1_node
2008-02-14 22:45:56 +00:00
d1 = subdir1_node . add_file ( u " mydata567 " , ut )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " publish finished " )
def _stash_uri ( filenode ) :
self . uri = filenode . get_uri ( )
download: refactor handling of URI Extension Block and crypttext hash tree, simplify things
Refactor into a class the logic of asking each server in turn until one of them gives an answer
that validates. It is called ValidatedThingObtainer.
Refactor the downloading and verification of the URI Extension Block into a class named
ValidatedExtendedURIProxy.
The new logic of validating UEBs is minimalist: it doesn't require the UEB to contain any
unncessary information, but of course it still accepts such information for backwards
compatibility (so that this new download code is able to download files uploaded with old, and
for that matter with current, upload code).
The new logic of validating UEBs follows the practice of doing all validation up front. This
practice advises one to isolate the validation of incoming data into one place, so that all of
the rest of the code can assume only valid data.
If any redundant information is present in the UEB+URI, the new code cross-checks and asserts
that it is all fully consistent. This closes some issues where the uploader could have
uploaded inconsistent redundant data, which would probably have caused the old downloader to
simply reject that download after getting a Python exception, but perhaps could have caused
greater harm to the old downloader.
I removed the notion of selecting an erasure codec from codec.py based on the string that was
passed in the UEB. Currently "crs" is the only such string that works, so
"_assert(codec_name == 'crs')" is simpler and more explicit. This is also in keeping with the
"validate up front" strategy -- now if someone sets a different string than "crs" in their UEB,
the downloader will reject the download in the "validate this UEB" function instead of in a
separate "select the codec instance" function.
I removed the code to check plaintext hashes and plaintext Merkle Trees. Uploaders do not
produce this information any more (since it potentially exposes confidential information about
the file), and the unit tests for it were disabled. The downloader before this patch would
check that plaintext hash or plaintext merkle tree if they were present, but not complain if
they were absent. The new downloader in this patch complains if they are present and doesn't
check them. (We might in the future re-introduce such hashes over the plaintext, but encrypt
the hashes which are stored in the UEB to preserve confidentiality. This would be a double-
check on the correctness of our own source code -- the current Merkle Tree over the ciphertext
is already sufficient to guarantee the integrity of the download unless there is a bug in our
Merkle Tree or AES implementation.)
This patch increases the lines-of-code count by 8 (from 17,770 to 17,778), and reduces the
uncovered-by-tests lines-of-code count by 24 (from 1408 to 1384). Those numbers would be more
meaningful if we omitted src/allmydata/util/ from the test-coverage statistics.
2008-12-05 15:17:54 +00:00
assert isinstance ( self . uri , str ) , ( self . uri , filenode )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( _stash_uri )
return d1
d . addCallback ( _made_subdir1 )
2007-06-28 18:00:03 +00:00
return d
2007-09-25 01:12:37 +00:00
def _do_publish2 ( self , res ) :
2008-03-24 16:46:06 +00:00
ut = upload . Data ( self . data , convergence = None )
2009-10-13 02:15:20 +00:00
d = self . _subdir1_node . create_subdirectory ( u " subdir2 " )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda subdir2 : subdir2 . add_file ( u " mydata992 " , ut ) )
2007-09-25 01:12:37 +00:00
return d
2008-09-08 03:03:36 +00:00
def log ( self , res , * args , * * kwargs ) :
# print "MSG: %s RES: %s" % (msg, args)
log . msg ( * args , * * kwargs )
2007-06-28 18:00:03 +00:00
return res
def _do_publish_private ( self , res ) :
2007-07-12 23:17:49 +00:00
self . smalldata = " sssh, very secret stuff "
2008-03-24 16:46:06 +00:00
ut = upload . Data ( self . smalldata , convergence = None )
2009-10-12 22:45:06 +00:00
d = self . clients [ 0 ] . create_dirnode ( )
2007-12-03 21:52:42 +00:00
d . addCallback ( self . log , " GOT private directory " )
2008-01-04 00:02:05 +00:00
def _got_new_dir ( privnode ) :
2007-12-03 21:52:42 +00:00
rootnode = self . clients [ 0 ] . create_node_from_uri ( self . _root_directory_uri )
2009-10-13 02:15:20 +00:00
d1 = privnode . create_subdirectory ( u " personal " )
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " made P/personal " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda node : node . add_file ( u " sekrit data " , ut ) )
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " made P/personal/sekrit data " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : rootnode . get_child_at_path ( [ u " subdir1 " , u " subdir2 " ] ) )
2007-06-28 22:05:33 +00:00
def _got_s2 ( s2node ) :
2009-10-12 23:51:26 +00:00
d2 = privnode . set_uri ( u " s2-rw " , s2node . get_uri ( ) ,
s2node . get_readonly_uri ( ) )
d2 . addCallback ( lambda node :
privnode . set_uri ( u " s2-ro " ,
s2node . get_readonly_uri ( ) ,
s2node . get_readonly_uri ( ) ) )
2007-06-28 22:05:33 +00:00
return d2
d1 . addCallback ( _got_s2 )
2008-01-04 00:02:05 +00:00
d1 . addCallback ( lambda res : privnode )
2007-06-28 22:05:33 +00:00
return d1
2008-01-04 00:02:05 +00:00
d . addCallback ( _got_new_dir )
2007-06-28 18:00:03 +00:00
return d
def _check_publish1 ( self , res ) :
# this one uses the iterative API
c1 = self . clients [ 1 ]
2007-12-03 21:52:42 +00:00
d = defer . succeed ( c1 . create_node_from_uri ( self . _root_directory_uri ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( self . log , " check_publish1 got / " )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda root : root . get ( u " subdir1 " ) )
d . addCallback ( lambda subdir1 : subdir1 . get ( u " mydata567 " ) )
2009-12-01 22:44:35 +00:00
d . addCallback ( lambda filenode : download_to_data ( filenode ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( self . log , " get finished " )
def _get_done ( data ) :
self . failUnlessEqual ( data , self . data )
d . addCallback ( _get_done )
return d
def _check_publish2 ( self , res ) :
# this one uses the path-based API
2007-12-03 21:52:42 +00:00
rootnode = self . clients [ 1 ] . create_node_from_uri ( self . _root_directory_uri )
2008-02-14 22:45:56 +00:00
d = rootnode . get_child_at_path ( u " subdir1 " )
2007-06-28 18:00:03 +00:00
d . addCallback ( lambda dirnode :
self . failUnless ( IDirectoryNode . providedBy ( dirnode ) ) )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : rootnode . get_child_at_path ( u " subdir1/mydata567 " ) )
2009-12-01 22:44:35 +00:00
d . addCallback ( lambda filenode : download_to_data ( filenode ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( lambda data : self . failUnlessEqual ( data , self . data ) )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : rootnode . get_child_at_path ( u " subdir1/mydata567 " ) )
2007-06-28 18:00:03 +00:00
def _got_filenode ( filenode ) :
2007-12-03 21:52:42 +00:00
fnode = self . clients [ 1 ] . create_node_from_uri ( filenode . get_uri ( ) )
assert fnode == filenode
2007-06-28 18:00:03 +00:00
d . addCallback ( _got_filenode )
return d
2008-01-04 00:02:05 +00:00
def _check_publish_private ( self , resnode ) :
2007-06-28 18:00:03 +00:00
# this one uses the path-based API
2008-01-04 00:02:05 +00:00
self . _private_node = resnode
2007-12-03 21:52:42 +00:00
2008-02-14 22:45:56 +00:00
d = self . _private_node . get_child_at_path ( u " personal " )
2007-06-28 22:05:33 +00:00
def _got_personal ( personal ) :
self . _personal_node = personal
return personal
d . addCallback ( _got_personal )
2007-12-03 21:52:42 +00:00
2007-06-28 18:00:03 +00:00
d . addCallback ( lambda dirnode :
2007-12-03 21:52:42 +00:00
self . failUnless ( IDirectoryNode . providedBy ( dirnode ) , dirnode ) )
def get_path ( path ) :
return self . _private_node . get_child_at_path ( path )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : get_path ( u " personal/sekrit data " ) )
2009-12-01 22:44:35 +00:00
d . addCallback ( lambda filenode : download_to_data ( filenode ) )
2007-07-12 23:17:49 +00:00
d . addCallback ( lambda data : self . failUnlessEqual ( data , self . smalldata ) )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : get_path ( u " s2-rw " ) )
2007-06-28 22:05:33 +00:00
d . addCallback ( lambda dirnode : self . failUnless ( dirnode . is_mutable ( ) ) )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : get_path ( u " s2-ro " ) )
2007-06-28 22:05:33 +00:00
def _got_s2ro ( dirnode ) :
2007-12-03 21:52:42 +00:00
self . failUnless ( dirnode . is_mutable ( ) , dirnode )
self . failUnless ( dirnode . is_readonly ( ) , dirnode )
2007-06-28 22:05:33 +00:00
d1 = defer . succeed ( None )
d1 . addCallback ( lambda res : dirnode . list ( ) )
d1 . addCallback ( self . log , " dirnode.list " )
2007-12-03 21:52:42 +00:00
2010-01-27 06:44:30 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotWriteableError , " mkdir(nope) " , None , dirnode . create_subdirectory , u " nope " ) )
2007-12-03 21:52:42 +00:00
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " doing add_file(ro) " )
2008-03-24 16:46:06 +00:00
ut = upload . Data ( " I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know. " , convergence = " 99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this) " )
2010-01-27 06:44:30 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotWriteableError , " add_file(nope) " , None , dirnode . add_file , u " hope " , ut ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " doing get(ro) " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : dirnode . get ( u " mydata992 " ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( lambda filenode :
self . failUnless ( IFileNode . providedBy ( filenode ) ) )
d1 . addCallback ( self . log , " doing delete(ro) " )
2010-01-27 06:44:30 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotWriteableError , " delete(nope) " , None , dirnode . delete , u " mydata992 " ) )
2007-06-28 22:05:33 +00:00
2010-01-27 06:44:30 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotWriteableError , " set_uri(nope) " , None , dirnode . set_uri , u " hopeless " , self . uri , self . uri ) )
2007-06-28 22:05:33 +00:00
2008-10-27 20:15:25 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NoSuchChildError , " get(missing) " , " missing " , dirnode . get , u " missing " ) )
2007-06-28 22:05:33 +00:00
personal = self . _personal_node
2010-01-27 06:44:30 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotWriteableError , " mv from readonly " , None , dirnode . move_child_to , u " mydata992 " , personal , u " nope " ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " doing move_child_to(ro)2 " )
2010-01-27 06:44:30 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotWriteableError , " mv to readonly " , None , personal . move_child_to , u " sekrit data " , dirnode , u " nope " ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " finished with _got_s2ro " )
return d1
d . addCallback ( _got_s2ro )
2007-12-03 21:52:42 +00:00
def _got_home ( dummy ) :
home = self . _private_node
2007-06-28 22:05:33 +00:00
personal = self . _personal_node
d1 = defer . succeed ( None )
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " mv ' P/personal/sekrit data ' to P/sekrit " )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( lambda res :
2008-02-14 22:45:56 +00:00
personal . move_child_to ( u " sekrit data " , home , u " sekrit " ) )
2007-06-28 22:05:33 +00:00
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " mv P/sekrit ' P/sekrit data ' " )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( lambda res :
2008-02-14 22:45:56 +00:00
home . move_child_to ( u " sekrit " , home , u " sekrit data " ) )
2007-06-28 22:05:33 +00:00
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " mv ' P/sekret data ' P/personal/ " )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( lambda res :
2008-02-14 22:45:56 +00:00
home . move_child_to ( u " sekrit data " , personal ) )
2007-06-28 22:05:33 +00:00
2008-10-22 00:03:07 +00:00
d1 . addCallback ( lambda res : home . build_manifest ( ) . when_done ( ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " manifest " )
2008-10-07 04:36:18 +00:00
# five items:
# P/
2007-12-03 21:52:42 +00:00
# P/personal/
# P/personal/sekrit data
# P/s2-rw (same as P/s2-ro)
# P/s2-rw/mydata992 (same as P/s2-rw/mydata992)
2008-11-19 22:03:47 +00:00
d1 . addCallback ( lambda res :
self . failUnlessEqual ( len ( res [ " manifest " ] ) , 5 ) )
2008-10-22 00:03:07 +00:00
d1 . addCallback ( lambda res : home . start_deep_stats ( ) . when_done ( ) )
2008-05-08 23:19:42 +00:00
def _check_stats ( stats ) :
expected = { " count-immutable-files " : 1 ,
" count-mutable-files " : 0 ,
" count-literal-files " : 1 ,
" count-files " : 2 ,
" count-directories " : 3 ,
" size-immutable-files " : 112 ,
" size-literal-files " : 23 ,
#"size-directories": 616, # varies
#"largest-directory": 616,
" largest-directory-children " : 3 ,
" largest-immutable-file " : 112 ,
}
for k , v in expected . iteritems ( ) :
self . failUnlessEqual ( stats [ k ] , v ,
" stats[ %s ] was %s , not %s " %
( k , stats [ k ] , v ) )
self . failUnless ( stats [ " size-directories " ] > 1300 ,
stats [ " size-directories " ] )
self . failUnless ( stats [ " largest-directory " ] > 800 ,
stats [ " largest-directory " ] )
self . failUnlessEqual ( stats [ " size-files-histogram " ] ,
[ ( 11 , 31 , 1 ) , ( 101 , 316 , 1 ) ] )
d1 . addCallback ( _check_stats )
2007-06-28 22:05:33 +00:00
return d1
d . addCallback ( _got_home )
2007-06-28 18:00:03 +00:00
return d
2007-06-28 22:05:33 +00:00
def shouldFail ( self , res , expected_failure , which , substring = None ) :
if isinstance ( res , Failure ) :
res . trap ( expected_failure )
if substring :
self . failUnless ( substring in str ( res ) ,
" substring ' %s ' not in ' %s ' "
% ( substring , str ( res ) ) )
else :
self . fail ( " %s was supposed to raise %s , not get ' %s ' " %
( which , expected_failure , res ) )
2007-12-03 21:52:42 +00:00
def shouldFail2 ( self , expected_failure , which , substring , callable , * args , * * kwargs ) :
assert substring is None or isinstance ( substring , str )
d = defer . maybeDeferred ( callable , * args , * * kwargs )
def done ( res ) :
if isinstance ( res , Failure ) :
res . trap ( expected_failure )
if substring :
self . failUnless ( substring in str ( res ) ,
" substring ' %s ' not in ' %s ' "
% ( substring , str ( res ) ) )
else :
self . fail ( " %s was supposed to raise %s , not get ' %s ' " %
( which , expected_failure , res ) )
d . addBoth ( done )
return d
2007-08-16 23:49:40 +00:00
def PUT ( self , urlpath , data ) :
2017-02-21 22:31:37 +00:00
return do_http ( " put " , self . webish_url + urlpath , data = data )
2007-08-16 23:49:40 +00:00
2017-02-21 22:31:37 +00:00
def GET ( self , urlpath ) :
return do_http ( " get " , self . webish_url + urlpath )
2007-08-16 23:49:40 +00:00
2017-02-21 22:31:37 +00:00
def POST ( self , urlpath , use_helper = False , * * fields ) :
2008-02-15 11:02:50 +00:00
sepbase = " boogabooga "
sep = " -- " + sepbase
form = [ ]
form . append ( sep )
form . append ( ' Content-Disposition: form-data; name= " _charset " ' )
form . append ( ' ' )
form . append ( ' UTF-8 ' )
form . append ( sep )
for name , value in fields . iteritems ( ) :
if isinstance ( value , tuple ) :
filename , value = value
form . append ( ' Content-Disposition: form-data; name= " %s " ; '
' filename= " %s " ' % ( name , filename . encode ( " utf-8 " ) ) )
else :
form . append ( ' Content-Disposition: form-data; name= " %s " ' % name )
form . append ( ' ' )
form . append ( str ( value ) )
form . append ( sep )
form [ - 1 ] + = " -- "
2009-11-18 07:09:00 +00:00
body = " "
headers = { }
if fields :
body = " \r \n " . join ( form ) + " \r \n "
headers [ " content-type " ] = " multipart/form-data; boundary= %s " % sepbase
2017-02-21 22:31:37 +00:00
return self . POST2 ( urlpath , body , headers , use_helper )
2009-11-18 07:09:00 +00:00
2017-02-21 22:31:37 +00:00
def POST2 ( self , urlpath , body = " " , headers = { } , use_helper = False ) :
2009-11-18 07:09:00 +00:00
if use_helper :
url = self . helper_webish_url + urlpath
else :
url = self . webish_url + urlpath
2017-02-21 22:31:37 +00:00
return do_http ( " post " , url , data = body , headers = headers )
2008-02-15 11:02:50 +00:00
2006-12-07 19:48:06 +00:00
def _test_web ( self , res ) :
2007-12-20 00:54:40 +00:00
public = " uri/ " + self . _root_directory_uri
2017-02-21 22:31:37 +00:00
d = self . GET ( " " )
2006-12-07 19:48:06 +00:00
def _got_welcome ( page ) :
2013-03-15 02:28:38 +00:00
html = page . replace ( ' \n ' , ' ' )
2013-04-14 21:32:13 +00:00
connected_re = r ' Connected to <span> %d </span> \ s*of <span> %d </span> known storage servers ' % ( self . numclients , self . numclients )
2013-03-15 02:28:38 +00:00
self . failUnless ( re . search ( connected_re , html ) ,
" I didn ' t see the right ' %s ' message in: \n %s " % ( connected_re , page ) )
2013-04-14 21:32:13 +00:00
# nodeids/tubids don't have any regexp-special characters
nodeid_re = r ' <th>Node ID:</th> \ s*<td title= " TubID: %s " > %s </td> ' % (
self . clients [ 0 ] . get_long_tubid ( ) , self . clients [ 0 ] . get_long_nodeid ( ) )
2013-03-15 02:28:38 +00:00
self . failUnless ( re . search ( nodeid_re , html ) ,
" I didn ' t see the right ' %s ' message in: \n %s " % ( nodeid_re , page ) )
2008-04-14 23:28:11 +00:00
self . failUnless ( " Helper: 0 active uploads " in page )
2006-12-07 19:48:06 +00:00
d . addCallback ( _got_welcome )
2007-08-16 23:49:40 +00:00
d . addCallback ( self . log , " done with _got_welcome " )
2008-02-15 11:02:50 +00:00
# get the welcome page from the node that uses the helper too
2017-02-21 22:31:37 +00:00
d . addCallback ( lambda res : do_http ( " get " , self . helper_webish_url ) )
2008-02-15 11:02:50 +00:00
def _got_welcome_helper ( page ) :
2013-03-15 02:28:38 +00:00
html = page . replace ( ' \n ' , ' ' )
2016-01-31 16:16:53 +00:00
self . failUnless ( re . search ( ' <img (src= " img/connected-yes.png " |alt= " Connected " ) {2} /> ' , html ) , page )
2013-03-15 02:28:38 +00:00
self . failUnlessIn ( " Not running helper " , page )
2008-02-15 11:02:50 +00:00
d . addCallback ( _got_welcome_helper )
2017-02-21 22:31:37 +00:00
d . addCallback ( lambda res : self . GET ( public ) )
d . addCallback ( lambda res : self . GET ( public + " /subdir1 " ) )
2006-12-07 19:48:06 +00:00
def _got_subdir1 ( page ) :
# there ought to be an href for our file
2011-06-11 16:37:41 +00:00
self . failUnlessIn ( ' <td align= " right " > %d </td> ' % len ( self . data ) , page )
2006-12-07 21:48:37 +00:00
self . failUnless ( " >mydata567</a> " in page )
2006-12-07 19:48:06 +00:00
d . addCallback ( _got_subdir1 )
2007-08-16 23:49:40 +00:00
d . addCallback ( self . log , " done with _got_subdir1 " )
2017-02-21 22:31:37 +00:00
d . addCallback ( lambda res : self . GET ( public + " /subdir1/mydata567 " ) )
2006-12-07 21:48:37 +00:00
def _got_data ( page ) :
self . failUnlessEqual ( page , self . data )
d . addCallback ( _got_data )
2007-04-24 08:41:54 +00:00
# download from a URI embedded in a URL
2007-08-16 23:49:40 +00:00
d . addCallback ( self . log , " _get_from_uri " )
2007-04-24 08:41:54 +00:00
def _get_from_uri ( res ) :
2017-02-21 22:31:37 +00:00
return self . GET ( " uri/ %s ?filename= %s " % ( self . uri , " mydata567 " ) )
2007-04-24 08:41:54 +00:00
d . addCallback ( _get_from_uri )
def _got_from_uri ( page ) :
self . failUnlessEqual ( page , self . data )
d . addCallback ( _got_from_uri )
# download from a URI embedded in a URL, second form
2007-08-16 23:49:40 +00:00
d . addCallback ( self . log , " _get_from_uri2 " )
2007-04-24 08:41:54 +00:00
def _get_from_uri2 ( res ) :
2017-02-21 22:31:37 +00:00
return self . GET ( " uri?uri= %s " % ( self . uri , ) )
2007-04-24 08:41:54 +00:00
d . addCallback ( _get_from_uri2 )
2007-12-03 21:52:42 +00:00
d . addCallback ( _got_from_uri )
2007-04-24 08:41:54 +00:00
2007-07-03 20:18:14 +00:00
# download from a bogus URI, make sure we get a reasonable error
2008-02-05 20:05:13 +00:00
d . addCallback ( self . log , " _get_from_bogus_uri " , level = log . UNUSUAL )
2017-02-21 22:31:37 +00:00
@defer.inlineCallbacks
2007-07-03 20:18:14 +00:00
def _get_from_bogus_uri ( res ) :
2017-02-21 22:31:37 +00:00
d1 = self . GET ( " uri/ %s ?filename= %s "
% ( self . mangle_uri ( self . uri ) , " mydata567 " ) )
e = yield self . assertFailure ( d1 , Error )
self . assertEquals ( e . status , " 410 " )
2007-07-03 20:18:14 +00:00
d . addCallback ( _get_from_bogus_uri )
2008-02-05 20:05:13 +00:00
d . addCallback ( self . log , " _got_from_bogus_uri " , level = log . UNUSUAL )
2007-08-16 23:49:40 +00:00
# upload a file with PUT
d . addCallback ( self . log , " about to try PUT " )
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . PUT ( public + " /subdir3/new.txt " ,
2007-08-16 23:49:40 +00:00
" new.txt contents " ) )
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . GET ( public + " /subdir3/new.txt " ) )
2007-08-16 23:49:40 +00:00
d . addCallback ( self . failUnlessEqual , " new.txt contents " )
2007-09-19 08:43:44 +00:00
# and again with something large enough to use multiple segments,
# and hopefully trigger pauseProducing too
2010-01-07 19:13:25 +00:00
def _new_happy_semantics ( ign ) :
for c in self . clients :
# these get reset somewhere? Whatever.
2014-04-21 21:40:28 +00:00
c . encoding_params [ ' happy ' ] = 1
2010-01-07 19:13:25 +00:00
d . addCallback ( _new_happy_semantics )
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . PUT ( public + " /subdir3/big.txt " ,
2007-09-19 08:43:44 +00:00
" big " * 500000 ) ) # 1.5MB
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . GET ( public + " /subdir3/big.txt " ) )
2007-09-19 08:43:44 +00:00
d . addCallback ( lambda res : self . failUnlessEqual ( len ( res ) , 1500000 ) )
2007-07-03 20:47:37 +00:00
2007-08-17 00:03:50 +00:00
# can we replace files in place?
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . PUT ( public + " /subdir3/new.txt " ,
2007-08-17 00:03:50 +00:00
" NEWER contents " ) )
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . GET ( public + " /subdir3/new.txt " ) )
2007-08-17 00:03:50 +00:00
d . addCallback ( self . failUnlessEqual , " NEWER contents " )
2008-02-15 11:02:50 +00:00
# test unlinked POST
d . addCallback ( lambda res : self . POST ( " uri " , t = " upload " ,
file = ( " new.txt " , " data " * 10000 ) ) )
# and again using the helper, which exercises different upload-status
# display code
d . addCallback ( lambda res : self . POST ( " uri " , use_helper = True , t = " upload " ,
file = ( " foo.txt " , " data2 " * 10000 ) ) )
2008-02-13 20:57:39 +00:00
# check that the status page exists
2017-02-21 22:31:37 +00:00
d . addCallback ( lambda res : self . GET ( " status " ) )
2008-03-04 04:37:17 +00:00
def _got_status ( res ) :
# find an interesting upload and download to look at. LIT files
# are not interesting.
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
h = self . clients [ 0 ] . get_history ( )
for ds in h . list_all_download_statuses ( ) :
2008-04-17 20:02:22 +00:00
if ds . get_size ( ) > 200 :
self . _down_status = ds . get_counter ( )
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
for us in h . list_all_upload_statuses ( ) :
2008-04-17 20:02:22 +00:00
if us . get_size ( ) > 200 :
self . _up_status = us . get_counter ( )
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
rs = list ( h . list_all_retrieve_statuses ( ) ) [ 0 ]
2008-04-17 02:05:41 +00:00
self . _retrieve_status = rs . get_counter ( )
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
ps = list ( h . list_all_publish_statuses ( ) ) [ 0 ]
2008-04-17 02:05:41 +00:00
self . _publish_status = ps . get_counter ( )
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
us = list ( h . list_all_mapupdate_statuses ( ) ) [ 0 ]
2008-04-17 20:02:22 +00:00
self . _update_status = us . get_counter ( )
2008-03-04 04:37:17 +00:00
# and that there are some upload- and download- status pages
return self . GET ( " status/up- %d " % self . _up_status )
d . addCallback ( _got_status )
def _got_up ( res ) :
return self . GET ( " status/down- %d " % self . _down_status )
d . addCallback ( _got_up )
2008-03-04 08:24:35 +00:00
def _got_down ( res ) :
2008-04-17 20:02:22 +00:00
return self . GET ( " status/mapupdate- %d " % self . _update_status )
2008-04-17 02:05:41 +00:00
d . addCallback ( _got_down )
2008-04-17 20:02:22 +00:00
def _got_update ( res ) :
return self . GET ( " status/publish- %d " % self . _publish_status )
d . addCallback ( _got_update )
2008-03-04 08:24:35 +00:00
def _got_publish ( res ) :
2012-06-08 22:21:46 +00:00
self . failUnlessIn ( " Publish Results " , res )
2008-03-04 08:24:35 +00:00
return self . GET ( " status/retrieve- %d " % self . _retrieve_status )
2008-04-17 02:05:41 +00:00
d . addCallback ( _got_publish )
2012-06-08 22:21:46 +00:00
def _got_retrieve ( res ) :
self . failUnlessIn ( " Retrieve Results " , res )
d . addCallback ( _got_retrieve )
2008-04-15 22:57:28 +00:00
# check that the helper status page exists
2017-02-21 22:31:37 +00:00
d . addCallback ( lambda res : self . GET ( " helper_status " ) )
2008-04-15 22:57:28 +00:00
def _got_helper_status ( res ) :
self . failUnless ( " Bytes Fetched: " in res )
# touch a couple of files in the helper's working directory to
# exercise more code paths
workdir = os . path . join ( self . getdir ( " client0 " ) , " helper " )
incfile = os . path . join ( workdir , " CHK_incoming " , " spurious " )
f = open ( incfile , " wb " )
f . write ( " small file " )
f . close ( )
then = time . time ( ) - 86400 * 3
now = time . time ( )
os . utime ( incfile , ( now , then ) )
encfile = os . path . join ( workdir , " CHK_encoding " , " spurious " )
f = open ( encfile , " wb " )
f . write ( " less small file " )
f . close ( )
os . utime ( encfile , ( now , then ) )
d . addCallback ( _got_helper_status )
# and that the json form exists
2017-02-21 22:31:37 +00:00
d . addCallback ( lambda res : self . GET ( " helper_status?t=json " ) )
2008-04-15 22:57:28 +00:00
def _got_helper_status_json ( res ) :
2017-01-19 22:39:53 +00:00
data = json . loads ( res )
2008-04-15 22:57:28 +00:00
self . failUnlessEqual ( data [ " chk_upload_helper.upload_need_upload " ] ,
1 )
self . failUnlessEqual ( data [ " chk_upload_helper.incoming_count " ] , 1 )
self . failUnlessEqual ( data [ " chk_upload_helper.incoming_size " ] , 10 )
self . failUnlessEqual ( data [ " chk_upload_helper.incoming_size_old " ] ,
10 )
self . failUnlessEqual ( data [ " chk_upload_helper.encoding_count " ] , 1 )
self . failUnlessEqual ( data [ " chk_upload_helper.encoding_size " ] , 15 )
self . failUnlessEqual ( data [ " chk_upload_helper.encoding_size_old " ] ,
15 )
d . addCallback ( _got_helper_status_json )
# and check that client[3] (which uses a helper but does not run one
# itself) doesn't explode when you ask for its status
2017-02-21 22:31:37 +00:00
d . addCallback ( lambda res : do_http ( " get " ,
self . helper_webish_url + " status/ " ) )
2008-04-15 22:57:28 +00:00
def _got_non_helper_status ( res ) :
2012-12-29 23:54:54 +00:00
self . failUnlessIn ( " Recent and Active Operations " , res )
2008-04-15 22:57:28 +00:00
d . addCallback ( _got_non_helper_status )
# or for helper status with t=json
d . addCallback ( lambda res :
2017-02-21 22:31:37 +00:00
do_http ( " get " ,
self . helper_webish_url + " helper_status?t=json " ) )
2008-04-15 22:57:28 +00:00
def _got_non_helper_status_json ( res ) :
2017-01-19 22:39:53 +00:00
data = json . loads ( res )
2008-04-15 22:57:28 +00:00
self . failUnlessEqual ( data , { } )
d . addCallback ( _got_non_helper_status_json )
# see if the statistics page exists
d . addCallback ( lambda res : self . GET ( " statistics " ) )
def _got_stats ( res ) :
2012-12-29 23:54:54 +00:00
self . failUnlessIn ( " Operational Statistics " , res )
self . failUnlessIn ( " ' downloader.files_downloaded ' : 5, " , res )
2008-04-15 22:57:28 +00:00
d . addCallback ( _got_stats )
d . addCallback ( lambda res : self . GET ( " statistics?t=json " ) )
def _got_stats_json ( res ) :
2017-01-19 22:39:53 +00:00
data = json . loads ( res )
2008-04-15 22:57:28 +00:00
self . failUnlessEqual ( data [ " counters " ] [ " uploader.files_uploaded " ] , 5 )
self . failUnlessEqual ( data [ " stats " ] [ " chk_upload_helper.upload_need_upload " ] , 1 )
d . addCallback ( _got_stats_json )
2007-08-22 21:54:34 +00:00
2007-07-03 20:47:37 +00:00
# TODO: mangle the second segment of a file, to test errors that
# occur after we've already sent some good data, which uses a
# different error path.
2007-07-03 20:18:14 +00:00
2007-07-08 03:06:44 +00:00
# TODO: download a URI with a form
2007-04-24 08:41:54 +00:00
# TODO: create a directory by using a form
# TODO: upload by using a form on the directory page
2007-12-03 21:52:42 +00:00
# url = base + "somedir/subdir1/freeform_post!!upload"
2007-04-24 08:41:54 +00:00
# TODO: delete a file by using a button on the directory page
2006-12-07 19:48:06 +00:00
return d
2016-09-09 17:41:20 +00:00
@defer.inlineCallbacks
2007-06-26 22:55:00 +00:00
def _test_runner ( self , res ) :
# exercise some of the diagnostic tools in runner.py
2007-07-13 23:58:08 +00:00
# find a share
2010-07-24 03:21:23 +00:00
for ( dirpath , dirnames , filenames ) in os . walk ( unicode ( self . basedir ) ) :
2007-07-13 23:58:08 +00:00
if " storage " not in dirpath :
continue
if not filenames :
continue
pieces = dirpath . split ( os . sep )
2009-02-21 07:17:10 +00:00
if ( len ( pieces ) > = 4
and pieces [ - 4 ] == " storage "
and pieces [ - 3 ] == " shares " ) :
2008-01-31 23:26:28 +00:00
# we're sitting in .../storage/shares/$START/$SINDEX , and there
# are sharefiles here
2007-07-13 23:58:08 +00:00
filename = os . path . join ( dirpath , filenames [ 0 ] )
2007-12-03 21:52:42 +00:00
# peek at the magic to see if it is a chk share
magic = open ( filename , " rb " ) . read ( 4 )
if magic == ' \x00 \x00 \x00 \x01 ' :
break
2007-06-26 22:55:00 +00:00
else :
2010-07-22 00:14:18 +00:00
self . fail ( " unable to find any uri_extension files in %r "
2007-06-26 22:55:00 +00:00
% self . basedir )
2010-07-22 00:14:18 +00:00
log . msg ( " test_system.SystemTest._test_runner using %r " % filename )
2007-06-26 23:51:38 +00:00
2016-09-09 17:41:20 +00:00
rc , output , err = yield run_cli ( " debug " , " dump-share " , " --offsets " ,
unicode_to_argv ( filename ) )
2007-06-26 22:55:00 +00:00
self . failUnlessEqual ( rc , 0 )
2007-09-25 01:12:37 +00:00
# we only upload a single file, so we can assert some things about
# its size and shares.
2010-07-22 00:14:18 +00:00
self . failUnlessIn ( " share filename: %s " % quote_output ( abspath_expanduser_unicode ( filename ) ) , output )
self . failUnlessIn ( " size: %d \n " % len ( self . data ) , output )
self . failUnlessIn ( " num_segments: 1 \n " , output )
2007-09-25 01:12:37 +00:00
# segment_size is always a multiple of needed_shares
2010-07-22 00:14:18 +00:00
self . failUnlessIn ( " segment_size: %d \n " % mathutil . next_multiple ( len ( self . data ) , 3 ) , output )
self . failUnlessIn ( " total_shares: 10 \n " , output )
2007-06-26 22:55:00 +00:00
# keys which are supposed to be present
for key in ( " size " , " num_segments " , " segment_size " ,
" needed_shares " , " total_shares " ,
" codec_name " , " codec_params " , " tail_codec_params " ,
2008-03-24 20:39:51 +00:00
#"plaintext_hash", "plaintext_root_hash",
2008-03-23 22:35:54 +00:00
" crypttext_hash " , " crypttext_root_hash " ,
2008-02-06 19:48:19 +00:00
" share_root_hash " , " UEB_hash " ) :
2010-07-22 00:14:18 +00:00
self . failUnlessIn ( " %s : " % key , output )
self . failUnlessIn ( " verify-cap: URI:CHK-Verifier: " , output )
2007-06-26 22:55:00 +00:00
2008-02-06 20:19:51 +00:00
# now use its storage index to find the other shares using the
# 'find-shares' tool
sharedir , shnum = os . path . split ( filename )
storagedir , storage_index_s = os . path . split ( sharedir )
2010-07-25 01:03:18 +00:00
storage_index_s = str ( storage_index_s )
2008-02-06 20:19:51 +00:00
nodedirs = [ self . getdir ( " client %d " % i ) for i in range ( self . numclients ) ]
2016-09-09 17:41:20 +00:00
rc , out , err = yield run_cli ( " debug " , " find-shares " , storage_index_s ,
* nodedirs )
2008-02-06 20:19:51 +00:00
self . failUnlessEqual ( rc , 0 )
2016-09-09 17:41:20 +00:00
sharefiles = [ sfn . strip ( ) for sfn in out . splitlines ( ) ]
2008-02-06 20:19:51 +00:00
self . failUnlessEqual ( len ( sharefiles ) , 10 )
2008-02-12 01:17:01 +00:00
# also exercise the 'catalog-shares' tool
nodedirs = [ self . getdir ( " client %d " % i ) for i in range ( self . numclients ) ]
2016-09-09 17:41:20 +00:00
rc , out , err = yield run_cli ( " debug " , " catalog-shares " , * nodedirs )
2008-02-12 01:17:01 +00:00
self . failUnlessEqual ( rc , 0 )
2016-09-09 17:41:20 +00:00
descriptions = [ sfn . strip ( ) for sfn in out . splitlines ( ) ]
2008-02-12 01:17:01 +00:00
self . failUnlessEqual ( len ( descriptions ) , 30 )
matching = [ line
for line in descriptions
if line . startswith ( " CHK %s " % storage_index_s ) ]
self . failUnlessEqual ( len ( matching ) , 10 )
2007-09-26 19:06:55 +00:00
def _test_control ( self , res ) :
# exercise the remote-control-the-client foolscap interfaces in
# allmydata.control (mostly used for performance tests)
c0 = self . clients [ 0 ]
2007-12-17 23:39:54 +00:00
control_furl_file = os . path . join ( c0 . basedir , " private " , " control.furl " )
2007-09-26 19:06:55 +00:00
control_furl = open ( control_furl_file , " r " ) . read ( ) . strip ( )
# it doesn't really matter which Tub we use to connect to the client,
2007-12-03 21:52:42 +00:00
# so let's just use our IntroducerNode's
d = self . introducer . tub . getReference ( control_furl )
2007-09-26 19:06:55 +00:00
d . addCallback ( self . _test_control2 , control_furl_file )
return d
def _test_control2 ( self , rref , filename ) :
2015-04-21 18:26:16 +00:00
d = defer . succeed ( None )
2007-12-14 10:08:16 +00:00
d . addCallback ( lambda res : rref . callRemote ( " speed_test " , 1 , 200 , False ) )
2011-09-15 20:26:20 +00:00
if sys . platform in ( " linux2 " , " linux3 " ) :
2007-09-26 19:06:55 +00:00
d . addCallback ( lambda res : rref . callRemote ( " get_memory_usage " ) )
2007-09-26 19:21:15 +00:00
d . addCallback ( lambda res : rref . callRemote ( " measure_peer_response_time " ) )
2007-09-26 19:06:55 +00:00
return d
2007-10-12 02:20:41 +00:00
def _test_cli ( self , res ) :
# run various CLI commands (in a thread, since they use blocking
# network calls)
2007-12-03 21:52:42 +00:00
private_uri = self . _private_node . get_uri ( )
2008-01-08 00:46:22 +00:00
client0_basedir = self . getdir ( " client0 " )
2007-12-03 21:52:42 +00:00
2007-10-12 02:20:41 +00:00
nodeargs = [
2008-01-08 00:46:22 +00:00
" --node-directory " , client0_basedir ,
2007-10-21 19:33:17 +00:00
]
d = defer . succeed ( None )
2007-10-12 02:20:41 +00:00
2008-05-20 02:28:50 +00:00
# for compatibility with earlier versions, private/root_dir.cap is
# supposed to be treated as an alias named "tahoe:". Start by making
# sure that works, before we add other aliases.
root_file = os . path . join ( client0_basedir , " private " , " root_dir.cap " )
f = open ( root_file , " w " )
f . write ( private_uri )
f . close ( )
2016-09-09 17:41:20 +00:00
@defer.inlineCallbacks
2008-08-01 22:05:14 +00:00
def run ( ignored , verb , * args , * * kwargs ) :
2016-09-09 17:41:20 +00:00
rc , out , err = yield run_cli ( verb , * args , nodeargs = nodeargs , * * kwargs )
defer . returnValue ( ( out , err ) )
2008-05-20 02:28:50 +00:00
def _check_ls ( ( out , err ) , expected_children , unexpected_children = [ ] ) :
self . failUnlessEqual ( err , " " )
for s in expected_children :
2008-08-01 18:46:24 +00:00
self . failUnless ( s in out , ( s , out ) )
2008-05-20 02:28:50 +00:00
for s in unexpected_children :
2008-08-01 18:46:24 +00:00
self . failIf ( s in out , ( s , out ) )
2008-05-20 02:28:50 +00:00
2007-10-21 19:33:17 +00:00
def _check_ls_root ( ( out , err ) ) :
2007-10-12 02:20:41 +00:00
self . failUnless ( " personal " in out )
self . failUnless ( " s2-ro " in out )
self . failUnless ( " s2-rw " in out )
self . failUnlessEqual ( err , " " )
2008-05-20 02:28:50 +00:00
# this should reference private_uri
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " personal " , " s2-ro " , " s2-rw " ] )
2008-05-20 21:36:04 +00:00
d . addCallback ( run , " list-aliases " )
def _check_aliases_1 ( ( out , err ) ) :
self . failUnlessEqual ( err , " " )
2010-06-07 01:02:15 +00:00
self . failUnlessEqual ( out . strip ( " \n " ) , " tahoe: %s " % private_uri )
2008-05-20 21:36:04 +00:00
d . addCallback ( _check_aliases_1 )
2008-05-20 02:28:50 +00:00
# now that that's out of the way, remove root_dir.cap and work with
# new files
d . addCallback ( lambda res : os . unlink ( root_file ) )
2008-05-20 21:36:04 +00:00
d . addCallback ( run , " list-aliases " )
def _check_aliases_2 ( ( out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( out , " " )
d . addCallback ( _check_aliases_2 )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " mkdir " )
def _got_dir ( ( out , err ) ) :
self . failUnless ( uri . from_string_dirnode ( out . strip ( ) ) )
return out . strip ( )
d . addCallback ( _got_dir )
d . addCallback ( lambda newcap : run ( None , " add-alias " , " tahoe " , newcap ) )
2008-05-20 21:36:04 +00:00
d . addCallback ( run , " list-aliases " )
def _check_aliases_3 ( ( out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnless ( " tahoe: " in out )
d . addCallback ( _check_aliases_3 )
2008-05-20 02:28:50 +00:00
def _check_empty_dir ( ( out , err ) ) :
self . failUnlessEqual ( out , " " )
2007-10-21 19:33:17 +00:00
self . failUnlessEqual ( err , " " )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " ls " )
d . addCallback ( _check_empty_dir )
2008-05-22 00:34:52 +00:00
def _check_missing_dir ( ( out , err ) ) :
# TODO: check that rc==2
self . failUnlessEqual ( out , " " )
self . failUnlessEqual ( err , " No such file or directory \n " )
d . addCallback ( run , " ls " , " bogus " )
d . addCallback ( _check_missing_dir )
2008-05-20 02:28:50 +00:00
files = [ ]
datas = [ ]
for i in range ( 10 ) :
fn = os . path . join ( self . basedir , " file %d " % i )
files . append ( fn )
data = " data to be uploaded: file %d \n " % i
datas . append ( data )
2008-05-20 03:24:12 +00:00
open ( fn , " wb " ) . write ( data )
2008-05-20 02:28:50 +00:00
2008-05-21 18:49:22 +00:00
def _check_stdout_against ( ( out , err ) , filenum = None , data = None ) :
2008-05-20 23:56:03 +00:00
self . failUnlessEqual ( err , " " )
2008-05-21 18:49:22 +00:00
if filenum is not None :
self . failUnlessEqual ( out , datas [ filenum ] )
if data is not None :
self . failUnlessEqual ( out , data )
2008-05-20 23:56:03 +00:00
2008-05-20 02:28:50 +00:00
# test all both forms of put: from a file, and from stdin
# tahoe put bar FOO
d . addCallback ( run , " put " , files [ 0 ] , " tahoe-file0 " )
def _put_out ( ( out , err ) ) :
self . failUnless ( " URI:LIT: " in out , out )
self . failUnless ( " 201 Created " in err , err )
uri0 = out . strip ( )
return run ( None , " get " , uri0 )
d . addCallback ( _put_out )
d . addCallback ( lambda ( out , err ) : self . failUnlessEqual ( out , datas [ 0 ] ) )
d . addCallback ( run , " put " , files [ 1 ] , " subdir/tahoe-file1 " )
# tahoe put bar tahoe:FOO
d . addCallback ( run , " put " , files [ 2 ] , " tahoe:file2 " )
2011-10-14 03:15:00 +00:00
d . addCallback ( run , " put " , " --format=SDMF " , files [ 3 ] , " tahoe:file3 " )
2008-05-20 19:49:11 +00:00
def _check_put_mutable ( ( out , err ) ) :
self . _mutable_file3_uri = out . strip ( )
d . addCallback ( _check_put_mutable )
2008-05-20 23:56:03 +00:00
d . addCallback ( run , " get " , " tahoe:file3 " )
d . addCallback ( _check_stdout_against , 3 )
2008-05-20 02:28:50 +00:00
# tahoe put FOO
STDIN_DATA = " This is the file to upload from stdin. "
2008-08-02 02:27:29 +00:00
d . addCallback ( run , " put " , " - " , " tahoe-file-stdin " , stdin = STDIN_DATA )
2008-05-20 02:28:50 +00:00
# tahoe put tahoe:FOO
2008-08-02 02:27:29 +00:00
d . addCallback ( run , " put " , " - " , " tahoe:from-stdin " ,
2008-08-01 22:05:14 +00:00
stdin = " Other file from stdin. " )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " ls " )
2008-05-20 19:36:55 +00:00
d . addCallback ( _check_ls , [ " tahoe-file0 " , " file2 " , " file3 " , " subdir " ,
2008-05-20 02:28:50 +00:00
" tahoe-file-stdin " , " from-stdin " ] )
d . addCallback ( run , " ls " , " subdir " )
d . addCallback ( _check_ls , [ " tahoe-file1 " ] )
# tahoe mkdir FOO
d . addCallback ( run , " mkdir " , " subdir2 " )
d . addCallback ( run , " ls " )
# TODO: extract the URI, set an alias with it
d . addCallback ( _check_ls , [ " subdir2 " ] )
# tahoe get: (to stdin and to a file)
d . addCallback ( run , " get " , " tahoe-file0 " )
2008-05-20 23:56:03 +00:00
d . addCallback ( _check_stdout_against , 0 )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " get " , " tahoe:subdir/tahoe-file1 " )
2008-05-20 23:56:03 +00:00
d . addCallback ( _check_stdout_against , 1 )
2008-05-20 02:28:50 +00:00
outfile0 = os . path . join ( self . basedir , " outfile0 " )
d . addCallback ( run , " get " , " file2 " , outfile0 )
def _check_outfile0 ( ( out , err ) ) :
data = open ( outfile0 , " rb " ) . read ( )
self . failUnlessEqual ( data , " data to be uploaded: file2 \n " )
d . addCallback ( _check_outfile0 )
outfile1 = os . path . join ( self . basedir , " outfile0 " )
d . addCallback ( run , " get " , " tahoe:subdir/tahoe-file1 " , outfile1 )
def _check_outfile1 ( ( out , err ) ) :
data = open ( outfile1 , " rb " ) . read ( )
self . failUnlessEqual ( data , " data to be uploaded: file1 \n " )
d . addCallback ( _check_outfile1 )
d . addCallback ( run , " rm " , " tahoe-file0 " )
d . addCallback ( run , " rm " , " tahoe:file2 " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ ] , [ " tahoe-file0 " , " file2 " ] )
d . addCallback ( run , " ls " , " -l " )
def _check_ls_l ( ( out , err ) ) :
lines = out . split ( " \n " )
for l in lines :
if " tahoe-file-stdin " in l :
2008-05-20 19:36:55 +00:00
self . failUnless ( l . startswith ( " -r-- " ) , l )
2008-05-20 02:28:50 +00:00
self . failUnless ( " %d " % len ( STDIN_DATA ) in l )
2008-05-20 19:36:55 +00:00
if " file3 " in l :
self . failUnless ( l . startswith ( " -rw- " ) , l ) # mutable
2008-05-20 02:28:50 +00:00
d . addCallback ( _check_ls_l )
2008-05-20 19:49:11 +00:00
d . addCallback ( run , " ls " , " --uri " )
def _check_ls_uri ( ( out , err ) ) :
lines = out . split ( " \n " )
for l in lines :
if " file3 " in l :
self . failUnless ( self . _mutable_file3_uri in l )
d . addCallback ( _check_ls_uri )
d . addCallback ( run , " ls " , " --readonly-uri " )
def _check_ls_rouri ( ( out , err ) ) :
lines = out . split ( " \n " )
for l in lines :
if " file3 " in l :
rw_uri = self . _mutable_file3_uri
u = uri . from_string_mutable_filenode ( rw_uri )
ro_uri = u . get_readonly ( ) . to_string ( )
self . failUnless ( ro_uri in l )
d . addCallback ( _check_ls_rouri )
2011-01-20 04:32:38 +00:00
d . addCallback ( run , " mv " , " tahoe-file-stdin " , " tahoe-moved " )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " ls " )
2011-01-20 04:32:38 +00:00
d . addCallback ( _check_ls , [ " tahoe-moved " ] , [ " tahoe-file-stdin " ] )
2007-10-21 19:33:17 +00:00
2008-05-20 20:30:31 +00:00
d . addCallback ( run , " ln " , " tahoe-moved " , " newlink " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " tahoe-moved " , " newlink " ] )
2008-05-20 23:56:03 +00:00
d . addCallback ( run , " cp " , " tahoe:file3 " , " tahoe:file3-copy " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " file3 " , " file3-copy " ] )
d . addCallback ( run , " get " , " tahoe:file3-copy " )
d . addCallback ( _check_stdout_against , 3 )
# copy from disk into tahoe
d . addCallback ( run , " cp " , files [ 4 ] , " tahoe:file4 " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " file3 " , " file3-copy " , " file4 " ] )
d . addCallback ( run , " get " , " tahoe:file4 " )
d . addCallback ( _check_stdout_against , 4 )
# copy from tahoe into disk
target_filename = os . path . join ( self . basedir , " file-out " )
d . addCallback ( run , " cp " , " tahoe:file4 " , target_filename )
def _check_cp_out ( ( out , err ) ) :
self . failUnless ( os . path . exists ( target_filename ) )
got = open ( target_filename , " rb " ) . read ( )
self . failUnlessEqual ( got , datas [ 4 ] )
d . addCallback ( _check_cp_out )
# copy from disk to disk (silly case)
target2_filename = os . path . join ( self . basedir , " file-out-copy " )
d . addCallback ( run , " cp " , target_filename , target2_filename )
def _check_cp_out2 ( ( out , err ) ) :
self . failUnless ( os . path . exists ( target2_filename ) )
got = open ( target2_filename , " rb " ) . read ( )
self . failUnlessEqual ( got , datas [ 4 ] )
d . addCallback ( _check_cp_out2 )
# copy from tahoe into disk, overwriting an existing file
d . addCallback ( run , " cp " , " tahoe:file3 " , target_filename )
def _check_cp_out3 ( ( out , err ) ) :
self . failUnless ( os . path . exists ( target_filename ) )
got = open ( target_filename , " rb " ) . read ( )
self . failUnlessEqual ( got , datas [ 3 ] )
d . addCallback ( _check_cp_out3 )
# copy from disk into tahoe, overwriting an existing immutable file
d . addCallback ( run , " cp " , files [ 5 ] , " tahoe:file4 " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " file3 " , " file3-copy " , " file4 " ] )
d . addCallback ( run , " get " , " tahoe:file4 " )
d . addCallback ( _check_stdout_against , 5 )
# copy from disk into tahoe, overwriting an existing mutable file
d . addCallback ( run , " cp " , files [ 5 ] , " tahoe:file3 " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " file3 " , " file3-copy " , " file4 " ] )
d . addCallback ( run , " get " , " tahoe:file3 " )
d . addCallback ( _check_stdout_against , 5 )
2008-05-21 18:49:22 +00:00
# recursive copy: setup
dn = os . path . join ( self . basedir , " dir1 " )
os . makedirs ( dn )
2008-05-22 00:35:21 +00:00
open ( os . path . join ( dn , " rfile1 " ) , " wb " ) . write ( " rfile1 " )
open ( os . path . join ( dn , " rfile2 " ) , " wb " ) . write ( " rfile2 " )
open ( os . path . join ( dn , " rfile3 " ) , " wb " ) . write ( " rfile3 " )
2008-05-21 18:49:22 +00:00
sdn2 = os . path . join ( dn , " subdir2 " )
os . makedirs ( sdn2 )
2008-05-22 00:35:21 +00:00
open ( os . path . join ( sdn2 , " rfile4 " ) , " wb " ) . write ( " rfile4 " )
open ( os . path . join ( sdn2 , " rfile5 " ) , " wb " ) . write ( " rfile5 " )
2008-05-21 18:49:22 +00:00
# from disk into tahoe
2013-08-05 15:53:44 +00:00
d . addCallback ( run , " cp " , " -r " , dn , " tahoe: " )
2008-05-22 00:35:21 +00:00
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " dir1 " ] )
d . addCallback ( run , " ls " , " dir1 " )
d . addCallback ( _check_ls , [ " rfile1 " , " rfile2 " , " rfile3 " , " subdir2 " ] ,
[ " rfile4 " , " rfile5 " ] )
d . addCallback ( run , " ls " , " tahoe:dir1/subdir2 " )
d . addCallback ( _check_ls , [ " rfile4 " , " rfile5 " ] ,
[ " rfile1 " , " rfile2 " , " rfile3 " ] )
d . addCallback ( run , " get " , " dir1/subdir2/rfile4 " )
d . addCallback ( _check_stdout_against , data = " rfile4 " )
2008-05-21 18:49:22 +00:00
2008-05-22 01:36:25 +00:00
# and back out again
dn_copy = os . path . join ( self . basedir , " dir1-copy " )
d . addCallback ( run , " cp " , " --verbose " , " -r " , " tahoe:dir1 " , dn_copy )
def _check_cp_r_out ( ( out , err ) ) :
def _cmp ( name ) :
old = open ( os . path . join ( dn , name ) , " rb " ) . read ( )
2013-08-05 15:53:44 +00:00
newfn = os . path . join ( dn_copy , " dir1 " , name )
2008-05-22 01:36:25 +00:00
self . failUnless ( os . path . exists ( newfn ) )
new = open ( newfn , " rb " ) . read ( )
self . failUnlessEqual ( old , new )
_cmp ( " rfile1 " )
_cmp ( " rfile2 " )
_cmp ( " rfile3 " )
_cmp ( os . path . join ( " subdir2 " , " rfile4 " ) )
_cmp ( os . path . join ( " subdir2 " , " rfile5 " ) )
d . addCallback ( _check_cp_r_out )
2008-05-22 01:40:49 +00:00
# and copy it a second time, which ought to overwrite the same files
d . addCallback ( run , " cp " , " -r " , " tahoe:dir1 " , dn_copy )
2009-03-15 23:19:58 +00:00
# and again, only writing filecaps
dn_copy2 = os . path . join ( self . basedir , " dir1-copy-capsonly " )
d . addCallback ( run , " cp " , " -r " , " --caps-only " , " tahoe:dir1 " , dn_copy2 )
def _check_capsonly ( ( out , err ) ) :
# these should all be LITs
2013-08-05 15:53:44 +00:00
x = open ( os . path . join ( dn_copy2 , " dir1 " , " subdir2 " , " rfile4 " ) ) . read ( )
2009-03-15 23:19:58 +00:00
y = uri . from_string_filenode ( x )
self . failUnlessEqual ( y . data , " rfile4 " )
d . addCallback ( _check_capsonly )
2008-05-22 01:40:49 +00:00
# and tahoe-to-tahoe
d . addCallback ( run , " cp " , " -r " , " tahoe:dir1 " , " tahoe:dir1-copy " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " dir1 " , " dir1-copy " ] )
2013-08-05 15:53:44 +00:00
d . addCallback ( run , " ls " , " dir1-copy/dir1 " )
2008-05-22 01:40:49 +00:00
d . addCallback ( _check_ls , [ " rfile1 " , " rfile2 " , " rfile3 " , " subdir2 " ] ,
[ " rfile4 " , " rfile5 " ] )
2013-08-05 15:53:44 +00:00
d . addCallback ( run , " ls " , " tahoe:dir1-copy/dir1/subdir2 " )
2008-05-22 01:40:49 +00:00
d . addCallback ( _check_ls , [ " rfile4 " , " rfile5 " ] ,
[ " rfile1 " , " rfile2 " , " rfile3 " ] )
2013-08-05 15:53:44 +00:00
d . addCallback ( run , " get " , " dir1-copy/dir1/subdir2/rfile4 " )
2008-05-22 01:40:49 +00:00
d . addCallback ( _check_stdout_against , data = " rfile4 " )
# and copy it a second time, which ought to overwrite the same files
d . addCallback ( run , " cp " , " -r " , " tahoe:dir1 " , " tahoe:dir1-copy " )
2007-10-21 19:33:17 +00:00
# tahoe_ls doesn't currently handle the error correctly: it tries to
# JSON-parse a traceback.
## def _ls_missing(res):
2012-06-18 17:43:49 +00:00
## argv = nodeargs + ["ls", "bogus"]
2007-10-21 19:33:17 +00:00
## return self._run_cli(argv)
## d.addCallback(_ls_missing)
## def _check_ls_missing((out,err)):
## print "OUT", out
## print "ERR", err
## self.failUnlessEqual(err, "")
## d.addCallback(_check_ls_missing)
2007-10-12 02:20:41 +00:00
return d
2011-01-20 04:32:38 +00:00
def test_filesystem_with_cli_in_subprocess ( self ) :
# We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe.
self . basedir = " system/SystemTest/test_filesystem_with_cli_in_subprocess "
d = self . set_up_nodes ( )
def _new_happy_semantics ( ign ) :
for c in self . clients :
2014-04-21 21:40:28 +00:00
c . encoding_params [ ' happy ' ] = 1
2011-01-20 04:32:38 +00:00
d . addCallback ( _new_happy_semantics )
def _run_in_subprocess ( ignored , verb , * args , * * kwargs ) :
stdin = kwargs . get ( " stdin " )
env = kwargs . get ( " env " )
2012-06-18 17:43:49 +00:00
newargs = [ " --node-directory " , self . getdir ( " client0 " ) , verb ] + list ( args )
2011-01-20 04:32:38 +00:00
return self . run_bintahoe ( newargs , stdin = stdin , env = env )
def _check_succeeded ( res , check_stderr = True ) :
out , err , rc_or_sig = res
self . failUnlessEqual ( rc_or_sig , 0 , str ( res ) )
if check_stderr :
self . failUnlessEqual ( err , " " )
d . addCallback ( _run_in_subprocess , " create-alias " , " newalias " )
d . addCallback ( _check_succeeded )
STDIN_DATA = " This is the file to upload from stdin. "
d . addCallback ( _run_in_subprocess , " put " , " - " , " newalias:tahoe-file " , stdin = STDIN_DATA )
d . addCallback ( _check_succeeded , check_stderr = False )
def _mv_with_http_proxy ( ign ) :
env = os . environ
env [ ' http_proxy ' ] = env [ ' HTTP_PROXY ' ] = " http://127.0.0.0:12345 " # invalid address
return _run_in_subprocess ( None , " mv " , " newalias:tahoe-file " , " newalias:tahoe-moved " , env = env )
d . addCallback ( _mv_with_http_proxy )
d . addCallback ( _check_succeeded )
d . addCallback ( _run_in_subprocess , " ls " , " newalias: " )
def _check_ls ( res ) :
out , err , rc_or_sig = res
self . failUnlessEqual ( rc_or_sig , 0 , str ( res ) )
self . failUnlessEqual ( err , " " , str ( res ) )
self . failUnlessIn ( " tahoe-moved " , out )
self . failIfIn ( " tahoe-file " , out )
d . addCallback ( _check_ls )
return d
2007-10-15 23:16:39 +00:00
def _test_checker ( self , res ) :
2008-07-16 00:23:25 +00:00
ut = upload . Data ( " too big to be literal " * 200 , convergence = None )
d = self . _personal_node . add_file ( u " big file " , ut )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda res : self . _personal_node . check ( Monitor ( ) ) )
2008-07-16 00:23:25 +00:00
def _check_dirnode_results ( r ) :
self . failUnless ( r . is_healthy ( ) )
d . addCallback ( _check_dirnode_results )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda res : self . _personal_node . check ( Monitor ( ) , verify = True ) )
2008-07-16 00:23:25 +00:00
d . addCallback ( _check_dirnode_results )
d . addCallback ( lambda res : self . _personal_node . get ( u " big file " ) )
def _got_chk_filenode ( n ) :
2009-11-20 07:22:39 +00:00
self . failUnless ( isinstance ( n , ImmutableFileNode ) )
2008-10-22 08:38:18 +00:00
d = n . check ( Monitor ( ) )
2008-07-16 00:23:25 +00:00
def _check_filenode_results ( r ) :
self . failUnless ( r . is_healthy ( ) )
d . addCallback ( _check_filenode_results )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda res : n . check ( Monitor ( ) , verify = True ) )
2008-07-16 00:23:25 +00:00
d . addCallback ( _check_filenode_results )
return d
d . addCallback ( _got_chk_filenode )
d . addCallback ( lambda res : self . _personal_node . get ( u " sekrit data " ) )
def _got_lit_filenode ( n ) :
2009-11-20 07:22:39 +00:00
self . failUnless ( isinstance ( n , LiteralFileNode ) )
2008-10-22 08:38:18 +00:00
d = n . check ( Monitor ( ) )
2008-09-07 19:44:56 +00:00
def _check_lit_filenode_results ( r ) :
self . failUnlessEqual ( r , None )
d . addCallback ( _check_lit_filenode_results )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda res : n . check ( Monitor ( ) , verify = True ) )
2008-09-07 19:44:56 +00:00
d . addCallback ( _check_lit_filenode_results )
2008-07-16 00:23:25 +00:00
return d
d . addCallback ( _got_lit_filenode )
2007-10-16 19:25:09 +00:00
return d
2012-06-15 01:48:55 +00:00
2012-06-16 18:18:44 +00:00
2012-06-15 01:48:55 +00:00
class Connections ( SystemTestMixin , unittest . TestCase ) :
def test_rref ( self ) :
self . basedir = " system/Connections/rref "
d = self . set_up_nodes ( 2 )
def _start ( ign ) :
self . c0 = self . clients [ 0 ]
2012-06-16 18:18:44 +00:00
nonclients = [ s for s in self . c0 . storage_broker . get_connected_servers ( )
make IServer.get_serverid() use pubkey, not tubid
This is a change I've wanted to make for many years, because when we get
to HTTP-based servers, we won't have tubids for them. What held me back
was that there's code all over the place that uses the serverid for
various purposes, so I wasn't sure it was safe. I did a big push a few
years ago to use IServer instances instead of serverids in most
places (in #1363), and to split out the values that actually depend upon
tubid into separate accessors (like get_lease_seed and
get_foolscap_write_enabler_seed), which I think took care of all the
important uses.
There are a number of places that use get_serverid() as dictionary key
to track shares (Checker results, mutable servermap). I believe these
are happy to use pubkeys instead of tubids: the only thing they do with
get_serverid() is to compare it to other values obtained from
get_serverid(). A few places in the WUI used serverid to compute display
values: these were fixed.
The main trouble was the Helper: it returns a HelperUploadResults (a
Copyable) with a share->server mapping that's keyed by whatever the
Helper's get_serverid() returns. If the uploader and the helper are on
different sides of this change, the Helper could return values that the
uploader won't recognize. This is cosmetic: that mapping is only used to
display the upload results on the "Recent and Active Operations" page.
I've added code to StorageFarmBroker.get_stub_server() to fall back to
tubids when looking up a server, so this should still work correctly
when the uploader is new and the Helper is old. If the Helper is new and
the uploader is old, the upload results will show unusual server ids.
refs ticket:1363
2016-08-26 19:16:17 +00:00
if s . get_serverid ( ) != self . c0 . get_long_nodeid ( ) ]
2012-06-16 18:18:44 +00:00
self . failUnlessEqual ( len ( nonclients ) , 1 )
self . s1 = nonclients [ 0 ] # s1 is the server, not c0
self . s1_rref = self . s1 . get_rref ( )
2012-06-15 01:48:55 +00:00
self . failIfEqual ( self . s1_rref , None )
self . failUnless ( self . s1 . is_connected ( ) )
d . addCallback ( _start )
# now shut down the server
d . addCallback ( lambda ign : self . clients [ 1 ] . disownServiceParent ( ) )
# and wait for the client to notice
def _poll ( ) :
return len ( self . c0 . storage_broker . get_connected_servers ( ) ) < 2
d . addCallback ( lambda ign : self . poll ( _poll ) )
def _down ( ign ) :
self . failIf ( self . s1 . is_connected ( ) )
rref = self . s1 . get_rref ( )
self . failUnless ( rref )
self . failUnlessIdentical ( rref , self . s1_rref )
d . addCallback ( _down )
return d