diff options
120 files changed, 7765 insertions, 3544 deletions
@@ -57,6 +57,9 @@ devtools: @type "solc" 2> /dev/null || echo 'Please install solc' @type "protoc" 2> /dev/null || echo 'Please install protoc' +swarm-devtools: + env GOBIN= go install ./cmd/swarm/mimegen + # Cross Compilation Targets (xgo) geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index 254b1f7fb..535e5d78b 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -137,6 +137,9 @@ func (abi *ABI) UnmarshalJSON(data []byte) error { // MethodById looks up a method by the 4-byte id // returns nil if none found func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { + if len(sigdata) < 4 { + return nil, fmt.Errorf("data too short (% bytes) for abi method lookup", len(sigdata)) + } for _, method := range abi.Methods { if bytes.Equal(method.Id(), sigdata[:4]) { return &method, nil diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index 8018df775..59ba79cb6 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -711,5 +711,14 @@ func TestABI_MethodById(t *testing.T) { t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id())) } } - + // Also test empty + if _, err := abi.MethodById([]byte{0x00}); err == nil { + t.Errorf("Expected error, too short to decode data") + } + if _, err := abi.MethodById([]byte{}); err == nil { + t.Errorf("Expected error, too short to decode data") + } + if _, err := abi.MethodById(nil); err == nil { + t.Errorf("Expected error, nil is short to decode data") + } } diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index fc0ccbf52..6f46fc149 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -208,7 +208,7 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad } // SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated -// chain doens't have miners, we just return a gas price of 1 for any call. +// chain doesn't have miners, we just return a gas price of 1 for any call. func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { return big.NewInt(1), nil } diff --git a/build/ci.go b/build/ci.go index 40252cbde..c5a107e1d 100644 --- a/build/ci.go +++ b/build/ci.go @@ -1040,7 +1040,7 @@ func xgoTool(args []string) *exec.Cmd { func doPurge(cmdline []string) { var ( store = flag.String("store", "", `Destination from where to purge archives (usually "gethstore/builds")`) - limit = flag.Int("days", 30, `Age threshold above which to delete unstalbe archives`) + limit = flag.Int("days", 30, `Age threshold above which to delete unstable archives`) ) flag.CommandLine.Parse(cmdline) diff --git a/cmd/clef/intapi_changelog.md b/cmd/clef/intapi_changelog.md index 7d2a897ea..9e13f67d0 100644 --- a/cmd/clef/intapi_changelog.md +++ b/cmd/clef/intapi_changelog.md @@ -1,5 +1,21 @@ ### Changelog for internal API (ui-api) +### 2.1.0 + +* Add `OnInputRequired(info UserInputRequest)` to internal API. This method is used when Clef needs user input, e.g. passwords. + +The following structures are used: +```golang + UserInputRequest struct { + Prompt string `json:"prompt"` + Title string `json:"title"` + IsPassword bool `json:"isPassword"` + } + UserInputResponse struct { + Text string `json:"text"` + } +``` + ### 2.0.0 * Modify how `call_info` on a transaction is conveyed. New format: diff --git a/cmd/swarm/access.go b/cmd/swarm/access.go index 67e852dde..dd2d513c2 100644 --- a/cmd/swarm/access.go +++ b/cmd/swarm/access.go @@ -130,7 +130,7 @@ func accessNewACT(ctx *cli.Context) { if err != nil { utils.Fatalf("had an error reading the grantee public key list") } - pkGrantees = strings.Split(string(bytes), "\n") + pkGrantees = strings.Split(strings.Trim(string(bytes), "\n"), "\n") } if passGranteesFilename != "" { @@ -138,7 +138,7 @@ func accessNewACT(ctx *cli.Context) { if err != nil { utils.Fatalf("could not read password filename: %v", err) } - passGrantees = strings.Split(string(bytes), "\n") + passGrantees = strings.Split(strings.Trim(string(bytes), "\n"), "\n") } accessKey, ae, actManifest, err = api.DoACT(ctx, privateKey, salt, pkGrantees, passGrantees) if err != nil { diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go index e623de8bb..16001010d 100644 --- a/cmd/swarm/config.go +++ b/cmd/swarm/config.go @@ -59,27 +59,28 @@ var ( //constants for environment variables const ( - SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" - SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" - SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" - SWARM_ENV_PORT = "SWARM_PORT" - SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" - SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" - SWARM_ENV_SWAP_API = "SWARM_SWAP_API" - SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE" - SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY" - SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE" - SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK" - SWARM_ENV_ENS_API = "SWARM_ENS_API" - SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" - SWARM_ENV_CORS = "SWARM_CORS" - SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" - SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE" - SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH" - SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY" - SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY" - SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD" - GETH_ENV_DATADIR = "GETH_DATADIR" + SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" + SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" + SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" + SWARM_ENV_PORT = "SWARM_PORT" + SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" + SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" + SWARM_ENV_SWAP_API = "SWARM_SWAP_API" + SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE" + SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY" + SWARM_ENV_MAX_STREAM_PEER_SERVERS = "SWARM_ENV_MAX_STREAM_PEER_SERVERS" + SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE" + SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK" + SWARM_ENV_ENS_API = "SWARM_ENS_API" + SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" + SWARM_ENV_CORS = "SWARM_CORS" + SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" + SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE" + SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH" + SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY" + SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY" + SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD" + GETH_ENV_DATADIR = "GETH_DATADIR" ) // These settings ensure that TOML keys use the same names as Go struct fields. @@ -211,6 +212,9 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.SyncUpdateDelay = d } + // any value including 0 is acceptable + currentConfig.MaxStreamPeerServers = ctx.GlobalInt(SwarmMaxStreamPeerServersFlag.Name) + if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) { currentConfig.LightNodeEnabled = true } @@ -323,6 +327,14 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { currentConfig.SyncUpdateDelay = d } + if max := os.Getenv(SWARM_ENV_MAX_STREAM_PEER_SERVERS); max != "" { + m, err := strconv.Atoi(max) + if err != nil { + utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_MAX_STREAM_PEER_SERVERS, err) + } + currentConfig.MaxStreamPeerServers = m + } + if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" { lightnode, err := strconv.ParseBool(lne) if err != nil { diff --git a/cmd/swarm/export_test.go b/cmd/swarm/export_test.go index 525538ad7..20df7b060 100644 --- a/cmd/swarm/export_test.go +++ b/cmd/swarm/export_test.go @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. +// +build !windows + package main import ( diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 4c9ce931e..5acf87c71 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -116,6 +116,12 @@ var ( Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY, } + SwarmMaxStreamPeerServersFlag = cli.IntFlag{ + Name: "max-stream-peer-servers", + Usage: "Limit of Stream peer servers, 0 denotes unlimited", + EnvVar: SWARM_ENV_MAX_STREAM_PEER_SERVERS, + Value: 10000, // A very large default value is possible as stream servers have very small memory footprint + } SwarmLightNodeEnabled = cli.BoolFlag{ Name: "lightnode", Usage: "Enable Swarm LightNode (default false)", @@ -197,21 +203,29 @@ var ( Usage: "Number of recent chunks cached in memory (default 5000)", EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY, } - SwarmResourceMultihashFlag = cli.BoolFlag{ - Name: "multihash", - Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource", + SwarmCompressedFlag = cli.BoolFlag{ + Name: "compressed", + Usage: "Prints encryption keys in compressed form", } SwarmResourceNameFlag = cli.StringFlag{ Name: "name", - Usage: "User-defined name for the new resource", + Usage: "User-defined name for the new resource, limited to 32 characters. If combined with topic, the resource will be a subtopic with this name", + } + SwarmResourceTopicFlag = cli.StringFlag{ + Name: "topic", + Usage: "User-defined topic this resource is tracking, hex encoded. Limited to 64 hexadecimal characters", } SwarmResourceDataOnCreateFlag = cli.StringFlag{ Name: "data", Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x", } - SwarmCompressedFlag = cli.BoolFlag{ - Name: "compressed", - Usage: "Prints encryption keys in compressed form", + SwarmResourceManifestFlag = cli.StringFlag{ + Name: "manifest", + Usage: "Refers to the resource through a manifest", + } + SwarmResourceUserFlag = cli.StringFlag{ + Name: "user", + Usage: "Indicates the user who updates the resource", } ) @@ -242,12 +256,12 @@ func init() { utils.ListenPortFlag.Value = 30399 } -var app = utils.NewApp(gitCommit, "Ethereum Swarm") +var app = utils.NewApp("", "Ethereum Swarm") // This init function creates the cli.App. func init() { app.Action = bzzd - app.HideVersion = true // we have a command to print the version + app.Version = sv.ArchiveVersion(gitCommit) app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" app.Commands = []cli.Command{ { @@ -341,27 +355,53 @@ func init() { Action: resourceCreate, CustomHelpTemplate: helpTemplate, Name: "create", - Usage: "creates a new Mutable Resource", - ArgsUsage: "<frequency>", - Description: "creates a new Mutable Resource", - Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag}, + Usage: "creates and publishes a new Mutable Resource manifest", + Description: `creates and publishes a new Mutable Resource manifest pointing to a specified user's updates about a particular topic. + The resource topic can be built in the following ways: + * use --topic to set the topic to an arbitrary binary hex string. + * use --name to set the topic to a human-readable name. + For example --name could be set to "profile-picture", meaning this Mutable Resource allows to get this user's current profile picture. + * use both --topic and --name to create named subtopics. + For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning + the Mutable Resource tracks a discussion about that contract. + The --user flag allows to have this manifest refer to a user other than yourself. If not specified, + it will then default to your local account (--bzzaccount)`, + Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceTopicFlag, SwarmResourceUserFlag}, }, { Action: resourceUpdate, CustomHelpTemplate: helpTemplate, Name: "update", Usage: "updates the content of an existing Mutable Resource", - ArgsUsage: "<Manifest Address or ENS domain> <0x Hex data>", - Description: "updates the content of an existing Mutable Resource", - Flags: []cli.Flag{SwarmResourceMultihashFlag}, + ArgsUsage: "<0x Hex data>", + Description: `publishes a new update on the specified topic + The resource topic can be built in the following ways: + * use --topic to set the topic to an arbitrary binary hex string. + * use --name to set the topic to a human-readable name. + For example --name could be set to "profile-picture", meaning this Mutable Resource allows to get this user's current profile picture. + * use both --topic and --name to create named subtopics. + For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning + the Mutable Resource tracks a discussion about that contract. + + If you have a manifest, you can specify it with --manifest to refer to the resource, + instead of using --topic / --name + `, + Flags: []cli.Flag{SwarmResourceManifestFlag, SwarmResourceNameFlag, SwarmResourceTopicFlag}, }, { Action: resourceInfo, CustomHelpTemplate: helpTemplate, Name: "info", Usage: "obtains information about an existing Mutable Resource", - ArgsUsage: "<Manifest Address or ENS domain>", - Description: "obtains information about an existing Mutable Resource", + Description: `obtains information about an existing Mutable Resource + The topic can be specified directly with the --topic flag as an hex string + If no topic is specified, the default topic (zero) will be used + The --name flag can be used to specify subtopics with a specific name. + The --user flag allows to refer to a user other than yourself. If not specified, + it will then default to your local account (--bzzaccount) + If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user + to refer to the resource`, + Flags: []cli.Flag{SwarmResourceManifestFlag, SwarmResourceNameFlag, SwarmResourceTopicFlag, SwarmResourceUserFlag}, }, }, }, @@ -542,6 +582,7 @@ pv(1) tool to get a progress bar: SwarmSwapAPIFlag, SwarmSyncDisabledFlag, SwarmSyncUpdateDelay, + SwarmMaxStreamPeerServersFlag, SwarmLightNodeEnabled, SwarmDeliverySkipCheckFlag, SwarmListenAddrFlag, diff --git a/cmd/swarm/mimegen/generator.go b/cmd/swarm/mimegen/generator.go new file mode 100644 index 000000000..68f9e306e --- /dev/null +++ b/cmd/swarm/mimegen/generator.go @@ -0,0 +1,124 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. +package main + +// Standard "mime" package rely on system-settings, see mime.osInitMime +// Swarm will run on many OS/Platform/Docker and must behave similar +// This command generates code to add common mime types based on mime.types file +// +// mime.types file provided by mailcap, which follow https://www.iana.org/assignments/media-types/media-types.xhtml +// +// Get last version of mime.types file by: +// docker run --rm -v $(pwd):/tmp alpine:edge /bin/sh -c "apk add -U mailcap; mv /etc/mime.types /tmp" + +import ( + "bufio" + "bytes" + "flag" + "html/template" + "io/ioutil" + "strings" + + "log" +) + +var ( + typesFlag = flag.String("types", "", "Input mime.types file") + packageFlag = flag.String("package", "", "Golang package in output file") + outFlag = flag.String("out", "", "Output file name for the generated mime types") +) + +type mime struct { + Name string + Exts []string +} + +type templateParams struct { + PackageName string + Mimes []mime +} + +func main() { + // Parse and ensure all needed inputs are specified + flag.Parse() + if *typesFlag == "" { + log.Fatalf("--types is required") + } + if *packageFlag == "" { + log.Fatalf("--types is required") + } + if *outFlag == "" { + log.Fatalf("--out is required") + } + + params := templateParams{ + PackageName: *packageFlag, + } + + types, err := ioutil.ReadFile(*typesFlag) + if err != nil { + log.Fatal(err) + } + + scanner := bufio.NewScanner(bytes.NewReader(types)) + for scanner.Scan() { + txt := scanner.Text() + if strings.HasPrefix(txt, "#") || len(txt) == 0 { + continue + } + parts := strings.Fields(txt) + if len(parts) == 1 { + continue + } + params.Mimes = append(params.Mimes, mime{parts[0], parts[1:]}) + } + + if err = scanner.Err(); err != nil { + log.Fatal(err) + } + + result := bytes.NewBuffer([]byte{}) + + if err := template.Must(template.New("_").Parse(tpl)).Execute(result, params); err != nil { + log.Fatal(err) + } + + if err := ioutil.WriteFile(*outFlag, result.Bytes(), 0600); err != nil { + log.Fatal(err) + } +} + +var tpl = `// Code generated by github.com/ethereum/go-ethereum/cmd/swarm/mimegen. DO NOT EDIT. + +package {{ .PackageName }} + +import "mime" +func init() { + var mimeTypes = map[string]string{ +{{- range .Mimes -}} + {{ $name := .Name -}} + {{- range .Exts }} + ".{{ . }}": "{{ $name | html }}", + {{- end }} +{{- end }} + } + for ext, name := range mimeTypes { + if err := mime.AddExtensionType(ext, name); err != nil { + panic(err) + } + } +} +` diff --git a/cmd/swarm/mimegen/mime.types b/cmd/swarm/mimegen/mime.types new file mode 100644 index 000000000..1bdf21149 --- /dev/null +++ b/cmd/swarm/mimegen/mime.types @@ -0,0 +1,1828 @@ +# This is a comment. I love comments. -*- indent-tabs-mode: t -*- + +# This file controls what Internet media types are sent to the client for +# given file extension(s). Sending the correct media type to the client +# is important so they know how to handle the content of the file. +# Extra types can either be added here or by using an AddType directive +# in your config files. For more information about Internet media types, +# please read RFC 2045, 2046, 2047, 2048, and 2077. The Internet media type +# registry is at <http://www.iana.org/assignments/media-types/>. + +# IANA types + +# MIME type Extensions +application/1d-interleaved-parityfec +application/3gpdash-qoe-report+xml +application/3gpp-ims+xml +application/A2L a2l +application/activemessage +application/alto-costmap+json +application/alto-costmapfilter+json +application/alto-directory+json +application/alto-endpointcost+json +application/alto-endpointcostparams+json +application/alto-endpointprop+json +application/alto-endpointpropparams+json +application/alto-error+json +application/alto-networkmap+json +application/alto-networkmapfilter+json +application/AML aml +application/andrew-inset ez +application/applefile +application/ATF atf +application/ATFX atfx +application/ATXML atxml +application/atom+xml atom +application/atomcat+xml atomcat +application/atomdeleted+xml atomdeleted +application/atomicmail +application/atomsvc+xml atomsvc +application/auth-policy+xml apxml +application/bacnet-xdd+zip xdd +application/batch-SMTP +application/beep+xml +application/calendar+json +application/calendar+xml xcs +application/call-completion +application/cals-1840 +application/cbor cbor +application/ccmp+xml ccmp +application/ccxml+xml ccxml +application/CDFX+XML cdfx +application/cdmi-capability cdmia +application/cdmi-container cdmic +application/cdmi-domain cdmid +application/cdmi-object cdmio +application/cdmi-queue cdmiq +application/cdni +application/CEA cea +application/cea-2018+xml +application/cellml+xml cellml cml +application/cfw +application/clue_info+xml clue +application/cms cmsc +application/cnrp+xml +application/coap-group+json +application/coap-payload +application/commonground +application/conference-info+xml +application/cpl+xml cpl +application/cose +application/cose-key +application/cose-key-set +application/csrattrs csrattrs +application/csta+xml +application/CSTAdata+xml +application/csvm+json +application/cybercash +application/dash+xml mpd +application/dashdelta mpdd +application/davmount+xml davmount +application/dca-rft +application/DCD dcd +application/dec-dx +application/dialog-info+xml +application/dicom dcm +application/dicom+json +application/dicom+xml +application/DII dii +application/DIT dit +application/dns +application/dskpp+xml xmls +application/dssc+der dssc +application/dssc+xml xdssc +application/dvcs dvc +application/ecmascript es +application/EDI-Consent +application/EDI-X12 +application/EDIFACT +application/efi efi +application/EmergencyCallData.Comment+xml +application/EmergencyCallData.Control+xml +application/EmergencyCallData.DeviceInfo+xml +application/EmergencyCallData.eCall.MSD +application/EmergencyCallData.ProviderInfo+xml +application/EmergencyCallData.ServiceInfo+xml +application/EmergencyCallData.SubscriberInfo+xml +application/EmergencyCallData.VEDS+xml +application/emma+xml emma +application/emotionml+xml emotionml +application/encaprtp +application/epp+xml +application/epub+zip epub +application/eshop +application/exi exi +application/fastinfoset finf +application/fastsoap +application/fdt+xml fdt +# fits, fit, fts: image/fits +application/fits +# application/font-sfnt deprecated in favor of font/sfnt +application/font-tdpfr pfr +# application/font-woff deprecated in favor of font/woff +application/framework-attributes+xml +application/geo+json geojson +application/geo+json-seq +application/gml+xml gml +application/gzip gz tgz +application/H224 +application/held+xml +application/http +application/hyperstudio stk +application/ibe-key-request+xml +application/ibe-pkg-reply+xml +application/ibe-pp-data +application/iges +application/im-iscomposing+xml +application/index +application/index.cmd +application/index.obj +application/index.response +application/index.vnd +application/inkml+xml ink inkml +application/iotp +application/ipfix ipfix +application/ipp +application/isup +application/its+xml its +application/javascript js +application/jose +application/jose+json +application/jrd+json jrd +application/json json +application/json-patch+json json-patch +application/json-seq +application/jwk+json +application/jwk-set+json +application/jwt +application/kpml-request+xml +application/kpml-response+xml +application/ld+json jsonld +application/lgr+xml lgr +application/link-format wlnk +application/load-control+xml +application/lost+xml lostxml +application/lostsync+xml lostsyncxml +application/LXF lxf +application/mac-binhex40 hqx +application/macwriteii +application/mads+xml mads +application/marc mrc +application/marcxml+xml mrcx +application/mathematica nb ma mb +application/mathml-content+xml +application/mathml-presentation+xml +application/mathml+xml mml +application/mbms-associated-procedure-description+xml +application/mbms-deregister+xml +application/mbms-envelope+xml +application/mbms-msk-response+xml +application/mbms-msk+xml +application/mbms-protection-description+xml +application/mbms-reception-report+xml +application/mbms-register-response+xml +application/mbms-register+xml +application/mbms-schedule+xml +application/mbms-user-service-description+xml +application/mbox mbox +application/media_control+xml +# mpf: text/vnd.ms-mediapackage +application/media-policy-dataset+xml +application/mediaservercontrol+xml +application/merge-patch+json +application/metalink4+xml meta4 +application/mets+xml mets +application/MF4 mf4 +application/mikey +application/mods+xml mods +application/moss-keys +application/moss-signature +application/mosskey-data +application/mosskey-request +application/mp21 m21 mp21 +# mp4, mpg4: video/mp4, see RFC 4337 +application/mp4 +application/mpeg4-generic +application/mpeg4-iod +application/mpeg4-iod-xmt +# xdf: application/xcap-diff+xml +application/mrb-consumer+xml +application/mrb-publish+xml +application/msc-ivr+xml +application/msc-mixer+xml +application/msword doc +application/mud+json +application/mxf mxf +application/n-quads nq +application/n-triples nt +application/nasdata +application/news-checkgroups +application/news-groupinfo +application/news-transmission +application/nlsml+xml +application/nss +application/ocsp-request orq +application/ocsp-response ors +application/octet-stream bin lha lzh exe class so dll img iso +application/oda oda +application/ODX odx +application/oebps-package+xml opf +application/ogg ogx +application/oxps oxps +application/p2p-overlay+xml relo +application/parityfec +# xer: application/xcap-error+xml +application/patch-ops-error+xml +application/pdf pdf +application/PDX pdx +application/pgp-encrypted pgp +application/pgp-keys +application/pgp-signature sig +application/pidf-diff+xml +application/pidf+xml +application/pkcs10 p10 +application/pkcs12 p12 pfx +application/pkcs7-mime p7m p7c +application/pkcs7-signature p7s +application/pkcs8 p8 +# ac: application/vnd.nokia.n-gage.ac+xml +application/pkix-attr-cert +application/pkix-cert cer +application/pkix-crl crl +application/pkix-pkipath pkipath +application/pkixcmp pki +application/pls+xml pls +application/poc-settings+xml +application/postscript ps eps ai +application/ppsp-tracker+json +application/problem+json +application/problem+xml +application/provenance+xml provx +application/prs.alvestrand.titrax-sheet +application/prs.cww cw cww +application/prs.hpub+zip hpub +application/prs.nprend rnd rct +application/prs.plucker +application/prs.rdf-xml-crypt rdf-crypt +application/prs.xsf+xml xsf +application/pskc+xml pskcxml +application/qsig +application/raptorfec +application/rdap+json +application/rdf+xml rdf +application/reginfo+xml rif +application/relax-ng-compact-syntax rnc +application/remote-printing +application/reputon+json +application/resource-lists-diff+xml rld +application/resource-lists+xml rl +application/rfc+xml rfcxml +application/riscos +application/rlmi+xml +application/rls-services+xml rs +application/rpki-ghostbusters gbr +application/rpki-manifest mft +application/rpki-publication +application/rpki-roa roa +application/rpki-updown +application/rtf rtf +application/rtploopback +application/rtx +application/samlassertion+xml +application/samlmetadata+xml +application/sbml+xml +application/scaip+xml +# scm: application/vnd.lotus-screencam +application/scim+json scim +application/scvp-cv-request scq +application/scvp-cv-response scs +application/scvp-vp-request spq +application/scvp-vp-response spp +application/sdp sdp +application/sep+xml +application/sep-exi +application/session-info +application/set-payment +application/set-payment-initiation +application/set-registration +application/set-registration-initiation +application/sgml +application/sgml-open-catalog soc +application/shf+xml shf +application/sieve siv sieve +application/simple-filter+xml cl +application/simple-message-summary +application/simpleSymbolContainer +application/slate +# application/smil obsoleted by application/smil+xml +application/smil+xml smil smi sml +application/smpte336m +application/soap+fastinfoset +application/soap+xml +application/sparql-query rq +application/sparql-results+xml srx +application/spirits-event+xml +application/sql sql +application/srgs gram +application/srgs+xml grxml +application/sru+xml sru +application/ssml+xml ssml +application/tamp-apex-update tau +application/tamp-apex-update-confirm auc +application/tamp-community-update tcu +application/tamp-community-update-confirm cuc +application/tamp-error ter +application/tamp-sequence-adjust tsa +application/tamp-sequence-adjust-confirm sac +# tsq: application/timestamp-query +application/tamp-status-query +# tsr: application/timestamp-reply +application/tamp-status-response +application/tamp-update tur +application/tamp-update-confirm tuc +application/tei+xml tei teiCorpus odd +application/thraud+xml tfi +application/timestamp-query tsq +application/timestamp-reply tsr +application/timestamped-data tsd +application/trig trig +application/ttml+xml ttml +application/tve-trigger +application/ulpfec +application/urc-grpsheet+xml gsheet +application/urc-ressheet+xml rsheet +application/urc-targetdesc+xml td +application/urc-uisocketdesc+xml uis +application/vcard+json +application/vcard+xml +application/vemmi +application/vnd.3gpp.access-transfer-events+xml +application/vnd.3gpp.bsf+xml +application/vnd.3gpp.mid-call+xml +application/vnd.3gpp.pic-bw-large plb +application/vnd.3gpp.pic-bw-small psb +application/vnd.3gpp.pic-bw-var pvb +application/vnd.3gpp-prose+xml +application/vnd.3gpp-prose-pc3ch+xml +# sms: application/vnd.3gpp2.sms +application/vnd.3gpp.sms +application/vnd.3gpp.sms+xml +application/vnd.3gpp.srvcc-ext+xml +application/vnd.3gpp.SRVCC-info+xml +application/vnd.3gpp.state-and-event-info+xml +application/vnd.3gpp.ussd+xml +application/vnd.3gpp2.bcmcsinfo+xml +application/vnd.3gpp2.sms sms +application/vnd.3gpp2.tcap tcap +application/vnd.3lightssoftware.imagescal imgcal +application/vnd.3M.Post-it-Notes pwn +application/vnd.accpac.simply.aso aso +application/vnd.accpac.simply.imp imp +application/vnd.acucobol acu +application/vnd.acucorp atc acutc +application/vnd.adobe.flash.movie swf +application/vnd.adobe.formscentral.fcdt fcdt +application/vnd.adobe.fxp fxp fxpl +application/vnd.adobe.partial-upload +application/vnd.adobe.xdp+xml xdp +application/vnd.adobe.xfdf xfdf +application/vnd.aether.imp +application/vnd.ah-barcode +application/vnd.ahead.space ahead +application/vnd.airzip.filesecure.azf azf +application/vnd.airzip.filesecure.azs azs +application/vnd.amazon.mobi8-ebook azw3 +application/vnd.americandynamics.acc acc +application/vnd.amiga.ami ami +application/vnd.amundsen.maze+xml +application/vnd.anki apkg +application/vnd.anser-web-certificate-issue-initiation cii +# Not in IANA listing, but is on FTP site? +application/vnd.anser-web-funds-transfer-initiation fti +# atx: audio/ATRAC-X +application/vnd.antix.game-component +application/vnd.apache.thrift.binary +application/vnd.apache.thrift.compact +application/vnd.apache.thrift.json +application/vnd.api+json +application/vnd.apothekende.reservation+json +application/vnd.apple.installer+xml dist distz pkg mpkg +# m3u: audio/x-mpegurl for now +application/vnd.apple.mpegurl m3u8 +# application/vnd.arastra.swi obsoleted by application/vnd.aristanetworks.swi +application/vnd.aristanetworks.swi swi +application/vnd.artsquare +application/vnd.astraea-software.iota iota +application/vnd.audiograph aep +application/vnd.autopackage package +application/vnd.avistar+xml +application/vnd.balsamiq.bmml+xml bmml +application/vnd.balsamiq.bmpr bmpr +application/vnd.bekitzur-stech+json +application/vnd.bint.med-content +application/vnd.biopax.rdf+xml +application/vnd.blueice.multipass mpm +application/vnd.bluetooth.ep.oob ep +application/vnd.bluetooth.le.oob le +application/vnd.bmi bmi +application/vnd.businessobjects rep +application/vnd.cab-jscript +application/vnd.canon-cpdl +application/vnd.canon-lips +application/vnd.capasystems-pg+json +application/vnd.cendio.thinlinc.clientconf tlclient +application/vnd.century-systems.tcp_stream +application/vnd.chemdraw+xml cdxml +application/vnd.chess-pgn pgn +application/vnd.chipnuts.karaoke-mmd mmd +application/vnd.cinderella cdy +application/vnd.cirpack.isdn-ext +application/vnd.citationstyles.style+xml csl +application/vnd.claymore cla +application/vnd.cloanto.rp9 rp9 +application/vnd.clonk.c4group c4g c4d c4f c4p c4u +application/vnd.cluetrust.cartomobile-config c11amc +application/vnd.cluetrust.cartomobile-config-pkg c11amz +application/vnd.coffeescript coffee +application/vnd.collection+json +application/vnd.collection.doc+json +application/vnd.collection.next+json +application/vnd.comicbook+zip cbz +# icc: application/vnd.iccprofile +application/vnd.commerce-battelle ica icf icd ic0 ic1 ic2 ic3 ic4 ic5 ic6 ic7 ic8 +application/vnd.commonspace csp cst +application/vnd.contact.cmsg cdbcmsg +application/vnd.coreos.ignition+json ign ignition +application/vnd.cosmocaller cmc +application/vnd.crick.clicker clkx +application/vnd.crick.clicker.keyboard clkk +application/vnd.crick.clicker.palette clkp +application/vnd.crick.clicker.template clkt +application/vnd.crick.clicker.wordbank clkw +application/vnd.criticaltools.wbs+xml wbs +application/vnd.ctc-posml pml +application/vnd.ctct.ws+xml +application/vnd.cups-pdf +application/vnd.cups-postscript +application/vnd.cups-ppd ppd +application/vnd.cups-raster +application/vnd.cups-raw +application/vnd.curl curl +application/vnd.cyan.dean.root+xml +application/vnd.cybank +application/vnd.d2l.coursepackage1p0+zip +application/vnd.dart dart +application/vnd.data-vision.rdz rdz +application/vnd.datapackage+json +application/vnd.dataresource+json +application/vnd.debian.binary-package deb udeb +application/vnd.dece.data uvf uvvf uvd uvvd +application/vnd.dece.ttml+xml uvt uvvt +application/vnd.dece.unspecified uvx uvvx +application/vnd.dece.zip uvz uvvz +application/vnd.denovo.fcselayout-link fe_launch +application/vnd.desmume.movie dsm +application/vnd.dir-bi.plate-dl-nosuffix +application/vnd.dm.delegation+xml +application/vnd.dna dna +application/vnd.document+json docjson +application/vnd.dolby.mobile.1 +application/vnd.dolby.mobile.2 +application/vnd.doremir.scorecloud-binary-document scld +application/vnd.dpgraph dpg mwc dpgraph +application/vnd.dreamfactory dfac +application/vnd.drive+json +application/vnd.dtg.local +application/vnd.dtg.local.flash fla +application/vnd.dtg.local.html +application/vnd.dvb.ait ait +# class: application/octet-stream +application/vnd.dvb.dvbj +application/vnd.dvb.esgcontainer +application/vnd.dvb.ipdcdftnotifaccess +application/vnd.dvb.ipdcesgaccess +application/vnd.dvb.ipdcesgaccess2 +application/vnd.dvb.ipdcesgpdd +application/vnd.dvb.ipdcroaming +application/vnd.dvb.iptv.alfec-base +application/vnd.dvb.iptv.alfec-enhancement +application/vnd.dvb.notif-aggregate-root+xml +application/vnd.dvb.notif-container+xml +application/vnd.dvb.notif-generic+xml +application/vnd.dvb.notif-ia-msglist+xml +application/vnd.dvb.notif-ia-registration-request+xml +application/vnd.dvb.notif-ia-registration-response+xml +application/vnd.dvb.notif-init+xml +# pfr: application/font-tdpfr +application/vnd.dvb.pfr +application/vnd.dvb.service svc +# dxr: application/x-director +application/vnd.dxr +application/vnd.dynageo geo +application/vnd.dzr dzr +application/vnd.easykaraoke.cdgdownload +application/vnd.ecdis-update +application/vnd.ecowin.chart mag +application/vnd.ecowin.filerequest +application/vnd.ecowin.fileupdate +application/vnd.ecowin.series +application/vnd.ecowin.seriesrequest +application/vnd.ecowin.seriesupdate +# img: application/octet-stream +application/vnd.efi-img +# iso: application/octet-stream +application/vnd.efi-iso +application/vnd.enliven nml +application/vnd.enphase.envoy +application/vnd.eprints.data+xml +application/vnd.epson.esf esf +application/vnd.epson.msf msf +application/vnd.epson.quickanime qam +application/vnd.epson.salt slt +application/vnd.epson.ssf ssf +application/vnd.ericsson.quickcall qcall qca +application/vnd.espass-espass+zip espass +application/vnd.eszigno3+xml es3 et3 +application/vnd.etsi.aoc+xml +application/vnd.etsi.asic-e+zip asice sce +# scs: application/scvp-cv-response +application/vnd.etsi.asic-s+zip asics +application/vnd.etsi.cug+xml +application/vnd.etsi.iptvcommand+xml +application/vnd.etsi.iptvdiscovery+xml +application/vnd.etsi.iptvprofile+xml +application/vnd.etsi.iptvsad-bc+xml +application/vnd.etsi.iptvsad-cod+xml +application/vnd.etsi.iptvsad-npvr+xml +application/vnd.etsi.iptvservice+xml +application/vnd.etsi.iptvsync+xml +application/vnd.etsi.iptvueprofile+xml +application/vnd.etsi.mcid+xml +application/vnd.etsi.mheg5 +application/vnd.etsi.overload-control-policy-dataset+xml +application/vnd.etsi.pstn+xml +application/vnd.etsi.sci+xml +application/vnd.etsi.simservs+xml +application/vnd.etsi.timestamp-token tst +application/vnd.etsi.tsl.der +application/vnd.etsi.tsl+xml +application/vnd.eudora.data +application/vnd.ezpix-album ez2 +application/vnd.ezpix-package ez3 +application/vnd.f-secure.mobile +application/vnd.fastcopy-disk-image dim +application/vnd.fdf fdf +application/vnd.fdsn.mseed msd mseed +application/vnd.fdsn.seed seed dataless +application/vnd.ffsns +application/vnd.filmit.zfc zfc +# all extensions: application/vnd.hbci +application/vnd.fints +application/vnd.firemonkeys.cloudcell +application/vnd.FloGraphIt gph +application/vnd.fluxtime.clip ftc +application/vnd.font-fontforge-sfd sfd +application/vnd.framemaker fm +application/vnd.frogans.fnc fnc +application/vnd.frogans.ltf ltf +application/vnd.fsc.weblaunch fsc +application/vnd.fujitsu.oasys oas +application/vnd.fujitsu.oasys2 oa2 +application/vnd.fujitsu.oasys3 oa3 +application/vnd.fujitsu.oasysgp fg5 +application/vnd.fujitsu.oasysprs bh2 +application/vnd.fujixerox.ART-EX +application/vnd.fujixerox.ART4 +application/vnd.fujixerox.ddd ddd +application/vnd.fujixerox.docuworks xdw +application/vnd.fujixerox.docuworks.binder xbd +application/vnd.fujixerox.docuworks.container xct +application/vnd.fujixerox.HBPL +application/vnd.fut-misnet +application/vnd.fuzzysheet fzs +application/vnd.genomatix.tuxedo txd +# application/vnd.geo+json obsoleted by application/geo+json +application/vnd.geocube+xml g3 g³ +application/vnd.geogebra.file ggb +application/vnd.geogebra.tool ggt +application/vnd.geometry-explorer gex gre +application/vnd.geonext gxt +application/vnd.geoplan g2w +application/vnd.geospace g3w +# gbr: application/rpki-ghostbusters +application/vnd.gerber +application/vnd.globalplatform.card-content-mgt +application/vnd.globalplatform.card-content-mgt-response +application/vnd.gmx gmx +application/vnd.google-earth.kml+xml kml +application/vnd.google-earth.kmz kmz +application/vnd.gov.sk.e-form+xml +application/vnd.gov.sk.e-form+zip +application/vnd.gov.sk.xmldatacontainer+xml +application/vnd.grafeq gqf gqs +application/vnd.gridmp +application/vnd.groove-account gac +application/vnd.groove-help ghf +application/vnd.groove-identity-message gim +application/vnd.groove-injector grv +application/vnd.groove-tool-message gtm +application/vnd.groove-tool-template tpl +application/vnd.groove-vcard vcg +application/vnd.hal+json +application/vnd.hal+xml hal +application/vnd.HandHeld-Entertainment+xml zmm +application/vnd.hbci hbci hbc kom upa pkd bpd +application/vnd.hc+json +# rep: application/vnd.businessobjects +application/vnd.hcl-bireports +application/vnd.hdt hdt +application/vnd.heroku+json +application/vnd.hhe.lesson-player les +application/vnd.hp-HPGL hpgl +application/vnd.hp-hpid hpi hpid +application/vnd.hp-hps hps +application/vnd.hp-jlyt jlt +application/vnd.hp-PCL pcl +application/vnd.hp-PCLXL +application/vnd.httphone +application/vnd.hydrostatix.sof-data sfd-hdstx +application/vnd.hyperdrive+json +application/vnd.hzn-3d-crossword x3d +application/vnd.ibm.afplinedata +application/vnd.ibm.electronic-media emm +application/vnd.ibm.MiniPay mpy +application/vnd.ibm.modcap list3820 listafp afp pseg3820 +application/vnd.ibm.rights-management irm +application/vnd.ibm.secure-container sc +application/vnd.iccprofile icc icm +application/vnd.ieee.1905 1905.1 +application/vnd.igloader igl +application/vnd.imagemeter.folder+zip imf +application/vnd.imagemeter.image+zip imi +application/vnd.immervision-ivp ivp +application/vnd.immervision-ivu ivu +application/vnd.ims.imsccv1p1 imscc +application/vnd.ims.imsccv1p2 +application/vnd.ims.imsccv1p3 +application/vnd.ims.lis.v2.result+json +application/vnd.ims.lti.v2.toolconsumerprofile+json +application/vnd.ims.lti.v2.toolproxy.id+json +application/vnd.ims.lti.v2.toolproxy+json +application/vnd.ims.lti.v2.toolsettings+json +application/vnd.ims.lti.v2.toolsettings.simple+json +application/vnd.informedcontrol.rms+xml +# application/vnd.informix-visionary obsoleted by application/vnd.visionary +application/vnd.infotech.project +application/vnd.infotech.project+xml +application/vnd.innopath.wamp.notification +application/vnd.insors.igm igm +application/vnd.intercon.formnet xpw xpx +application/vnd.intergeo i2g +application/vnd.intertrust.digibox +application/vnd.intertrust.nncp +application/vnd.intu.qbo qbo +application/vnd.intu.qfx qfx +application/vnd.iptc.g2.catalogitem+xml +application/vnd.iptc.g2.conceptitem+xml +application/vnd.iptc.g2.knowledgeitem+xml +application/vnd.iptc.g2.newsitem+xml +application/vnd.iptc.g2.newsmessage+xml +application/vnd.iptc.g2.packageitem+xml +application/vnd.iptc.g2.planningitem+xml +application/vnd.ipunplugged.rcprofile rcprofile +application/vnd.irepository.package+xml irp +application/vnd.is-xpr xpr +application/vnd.isac.fcs fcs +application/vnd.jam jam +application/vnd.japannet-directory-service +application/vnd.japannet-jpnstore-wakeup +application/vnd.japannet-payment-wakeup +application/vnd.japannet-registration +application/vnd.japannet-registration-wakeup +application/vnd.japannet-setstore-wakeup +application/vnd.japannet-verification +application/vnd.japannet-verification-wakeup +application/vnd.jcp.javame.midlet-rms rms +application/vnd.jisp jisp +application/vnd.joost.joda-archive joda +application/vnd.jsk.isdn-ngn +application/vnd.kahootz ktz ktr +application/vnd.kde.karbon karbon +application/vnd.kde.kchart chrt +application/vnd.kde.kformula kfo +application/vnd.kde.kivio flw +application/vnd.kde.kontour kon +application/vnd.kde.kpresenter kpr kpt +application/vnd.kde.kspread ksp +application/vnd.kde.kword kwd kwt +application/vnd.kenameaapp htke +application/vnd.kidspiration kia +application/vnd.Kinar kne knp sdf +application/vnd.koan skp skd skm skt +application/vnd.kodak-descriptor sse +application/vnd.las.las+json lasjson +application/vnd.las.las+xml lasxml +application/vnd.liberty-request+xml +application/vnd.llamagraphics.life-balance.desktop lbd +application/vnd.llamagraphics.life-balance.exchange+xml lbe +application/vnd.lotus-1-2-3 123 wk4 wk3 wk1 +application/vnd.lotus-approach apr vew +application/vnd.lotus-freelance prz pre +application/vnd.lotus-notes nsf ntf ndl ns4 ns3 ns2 nsh nsg +application/vnd.lotus-organizer or3 or2 org +application/vnd.lotus-screencam scm +application/vnd.lotus-wordpro lwp sam +application/vnd.macports.portpkg portpkg +application/vnd.mapbox-vector-tile mvt +application/vnd.marlin.drm.actiontoken+xml +application/vnd.marlin.drm.conftoken+xml +application/vnd.marlin.drm.license+xml +application/vnd.marlin.drm.mdcf mdc +application/vnd.mason+json +application/vnd.maxmind.maxmind-db mmdb +application/vnd.mcd mcd +application/vnd.medcalcdata mc1 +application/vnd.mediastation.cdkey cdkey +application/vnd.meridian-slingshot +application/vnd.MFER mwf +application/vnd.mfmp mfm +application/vnd.micro+json +application/vnd.micrografx.flo flo +application/vnd.micrografx.igx igx +application/vnd.microsoft.portable-executable +application/vnd.microsoft.windows.thumbnail-cache +application/vnd.miele+json +application/vnd.mif mif +application/vnd.minisoft-hp3000-save +application/vnd.mitsubishi.misty-guard.trustweb +application/vnd.Mobius.DAF daf +application/vnd.Mobius.DIS dis +application/vnd.Mobius.MBK mbk +application/vnd.Mobius.MQY mqy +application/vnd.Mobius.MSL msl +application/vnd.Mobius.PLC plc +application/vnd.Mobius.TXF txf +application/vnd.mophun.application mpn +application/vnd.mophun.certificate mpc +application/vnd.motorola.flexsuite +application/vnd.motorola.flexsuite.adsi +application/vnd.motorola.flexsuite.fis +application/vnd.motorola.flexsuite.gotap +application/vnd.motorola.flexsuite.kmr +application/vnd.motorola.flexsuite.ttc +application/vnd.motorola.flexsuite.wem +application/vnd.motorola.iprm +application/vnd.mozilla.xul+xml xul +application/vnd.ms-3mfdocument 3mf +application/vnd.ms-artgalry cil +application/vnd.ms-asf asf +application/vnd.ms-cab-compressed cab +application/vnd.ms-excel xls xlm xla xlc xlt xlw +application/vnd.ms-excel.template.macroEnabled.12 xltm +application/vnd.ms-excel.addin.macroEnabled.12 xlam +application/vnd.ms-excel.sheet.binary.macroEnabled.12 xlsb +application/vnd.ms-excel.sheet.macroEnabled.12 xlsm +application/vnd.ms-fontobject eot +application/vnd.ms-htmlhelp chm +application/vnd.ms-ims ims +application/vnd.ms-lrm lrm +application/vnd.ms-office.activeX+xml +application/vnd.ms-officetheme thmx +application/vnd.ms-playready.initiator+xml +application/vnd.ms-powerpoint ppt pps pot +application/vnd.ms-powerpoint.addin.macroEnabled.12 ppam +application/vnd.ms-powerpoint.presentation.macroEnabled.12 pptm +application/vnd.ms-powerpoint.slide.macroEnabled.12 sldm +application/vnd.ms-powerpoint.slideshow.macroEnabled.12 ppsm +application/vnd.ms-powerpoint.template.macroEnabled.12 potm +application/vnd.ms-PrintDeviceCapabilities+xml +application/vnd.ms-PrintSchemaTicket+xml +application/vnd.ms-project mpp mpt +application/vnd.ms-tnef tnef tnf +application/vnd.ms-windows.devicepairing +application/vnd.ms-windows.nwprinting.oob +application/vnd.ms-windows.printerpairing +application/vnd.ms-windows.wsd.oob +application/vnd.ms-wmdrm.lic-chlg-req +application/vnd.ms-wmdrm.lic-resp +application/vnd.ms-wmdrm.meter-chlg-req +application/vnd.ms-wmdrm.meter-resp +application/vnd.ms-word.document.macroEnabled.12 docm +application/vnd.ms-word.template.macroEnabled.12 dotm +application/vnd.ms-works wcm wdb wks wps +application/vnd.ms-wpl wpl +application/vnd.ms-xpsdocument xps +application/vnd.msa-disk-image msa +application/vnd.mseq mseq +application/vnd.msign +application/vnd.multiad.creator crtr +application/vnd.multiad.creator.cif cif +application/vnd.music-niff +application/vnd.musician mus +application/vnd.muvee.style msty +application/vnd.mynfc taglet +application/vnd.ncd.control +application/vnd.ncd.reference +application/vnd.nearst.inv+json +application/vnd.nervana entity request bkm kcm +application/vnd.netfpx +# ntf: application/vnd.lotus-notes +application/vnd.nitf nitf +application/vnd.neurolanguage.nlu nlu +application/vnd.nintendo.nitro.rom nds +application/vnd.nintendo.snes.rom sfc smc +application/vnd.noblenet-directory nnd +application/vnd.noblenet-sealer nns +application/vnd.noblenet-web nnw +application/vnd.nokia.catalogs +application/vnd.nokia.conml+wbxml +application/vnd.nokia.conml+xml +application/vnd.nokia.iptv.config+xml +application/vnd.nokia.iSDS-radio-presets +application/vnd.nokia.landmark+wbxml +application/vnd.nokia.landmark+xml +application/vnd.nokia.landmarkcollection+xml +application/vnd.nokia.n-gage.ac+xml ac +application/vnd.nokia.n-gage.data ngdat +application/vnd.nokia.n-gage.symbian.install n-gage +application/vnd.nokia.ncd +application/vnd.nokia.pcd+wbxml +application/vnd.nokia.pcd+xml +application/vnd.nokia.radio-preset rpst +application/vnd.nokia.radio-presets rpss +application/vnd.novadigm.EDM edm +application/vnd.novadigm.EDX edx +application/vnd.novadigm.EXT ext +application/vnd.ntt-local.content-share +application/vnd.ntt-local.file-transfer +application/vnd.ntt-local.ogw_remote-access +application/vnd.ntt-local.sip-ta_remote +application/vnd.ntt-local.sip-ta_tcp_stream +application/vnd.oasis.opendocument.chart odc +application/vnd.oasis.opendocument.chart-template otc +application/vnd.oasis.opendocument.database odb +application/vnd.oasis.opendocument.formula odf +# otf: font/otf +application/vnd.oasis.opendocument.formula-template +application/vnd.oasis.opendocument.graphics odg +application/vnd.oasis.opendocument.graphics-template otg +application/vnd.oasis.opendocument.image odi +application/vnd.oasis.opendocument.image-template oti +application/vnd.oasis.opendocument.presentation odp +application/vnd.oasis.opendocument.presentation-template otp +application/vnd.oasis.opendocument.spreadsheet ods +application/vnd.oasis.opendocument.spreadsheet-template ots +application/vnd.oasis.opendocument.text odt +application/vnd.oasis.opendocument.text-master odm +application/vnd.oasis.opendocument.text-template ott +application/vnd.oasis.opendocument.text-web oth +application/vnd.obn +application/vnd.ocf+cbor +application/vnd.oftn.l10n+json +application/vnd.oipf.contentaccessdownload+xml +application/vnd.oipf.contentaccessstreaming+xml +application/vnd.oipf.cspg-hexbinary +application/vnd.oipf.dae.svg+xml +application/vnd.oipf.dae.xhtml+xml +application/vnd.oipf.mippvcontrolmessage+xml +application/vnd.oipf.pae.gem +application/vnd.oipf.spdiscovery+xml +application/vnd.oipf.spdlist+xml +application/vnd.oipf.ueprofile+xml +application/vnd.olpc-sugar xo +application/vnd.oma.bcast.associated-procedure-parameter+xml +application/vnd.oma.bcast.drm-trigger+xml +application/vnd.oma.bcast.imd+xml +application/vnd.oma.bcast.ltkm +application/vnd.oma.bcast.notification+xml +application/vnd.oma.bcast.provisioningtrigger +application/vnd.oma.bcast.sgboot +application/vnd.oma.bcast.sgdd+xml +application/vnd.oma.bcast.sgdu +application/vnd.oma.bcast.simple-symbol-container +application/vnd.oma.bcast.smartcard-trigger+xml +application/vnd.oma.bcast.sprov+xml +application/vnd.oma.bcast.stkm +application/vnd.oma.cab-address-book+xml +application/vnd.oma.cab-feature-handler+xml +application/vnd.oma.cab-pcc+xml +application/vnd.oma.cab-subs-invite+xml +application/vnd.oma.cab-user-prefs+xml +application/vnd.oma.dcd +application/vnd.oma.dcdc +application/vnd.oma.dd2+xml dd2 +application/vnd.oma.drm.risd+xml +application/vnd.oma.group-usage-list+xml +application/vnd.oma.lwm2m+json +application/vnd.oma.lwm2m+tlv +application/vnd.oma.pal+xml +application/vnd.oma.poc.detailed-progress-report+xml +application/vnd.oma.poc.final-report+xml +application/vnd.oma.poc.groups+xml +application/vnd.oma.poc.invocation-descriptor+xml +application/vnd.oma.poc.optimized-progress-report+xml +application/vnd.oma.push +application/vnd.oma.scidm.messages+xml +application/vnd.oma.xcap-directory+xml +application/vnd.oma-scws-config +application/vnd.oma-scws-http-request +application/vnd.oma-scws-http-response +application/vnd.omads-email+xml +application/vnd.omads-file+xml +application/vnd.omads-folder+xml +application/vnd.omaloc-supl-init +application/vnd.onepager tam +application/vnd.onepagertamp tamp +application/vnd.onepagertamx tamx +application/vnd.onepagertat tat +application/vnd.onepagertatp tatp +application/vnd.onepagertatx tatx +application/vnd.openblox.game+xml obgx +application/vnd.openblox.game-binary obg +application/vnd.openeye.oeb oeb +application/vnd.openofficeorg.extension oxt +application/vnd.openstreetmap.data+xml osm +application/vnd.openxmlformats-officedocument.custom-properties+xml +application/vnd.openxmlformats-officedocument.customXmlProperties+xml +application/vnd.openxmlformats-officedocument.drawing+xml +application/vnd.openxmlformats-officedocument.drawingml.chart+xml +application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramColors+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramData+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramLayout+xml +application/vnd.openxmlformats-officedocument.drawingml.diagramStyle+xml +application/vnd.openxmlformats-officedocument.extended-properties+xml +application/vnd.openxmlformats-officedocument.presentationml.commentAuthors+xml +application/vnd.openxmlformats-officedocument.presentationml.comments+xml +application/vnd.openxmlformats-officedocument.presentationml.handoutMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.notesMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.notesSlide+xml +application/vnd.openxmlformats-officedocument.presentationml.presProps+xml +application/vnd.openxmlformats-officedocument.presentationml.presentation pptx +application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml +application/vnd.openxmlformats-officedocument.presentationml.slide sldx +application/vnd.openxmlformats-officedocument.presentationml.slide+xml +application/vnd.openxmlformats-officedocument.presentationml.slideLayout+xml +application/vnd.openxmlformats-officedocument.presentationml.slideMaster+xml +application/vnd.openxmlformats-officedocument.presentationml.slideUpdateInfo+xml +application/vnd.openxmlformats-officedocument.presentationml.slideshow ppsx +application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml +application/vnd.openxmlformats-officedocument.presentationml.tableStyles+xml +application/vnd.openxmlformats-officedocument.presentationml.tags+xml +application/vnd.openxmlformats-officedocument.presentationml.template potx +application/vnd.openxmlformats-officedocument.presentationml.template.main+xml +application/vnd.openxmlformats-officedocument.presentationml.viewProps+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.calcChain+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.externalLink+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheDefinition+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheRecords+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.pivotTable+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.queryTable+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.revisionHeaders+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.revisionLog+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx +application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.sheetMetadata+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.tableSingleCells+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.template xltx +application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.userNames+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.volatileDependencies+xml +application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml +application/vnd.openxmlformats-officedocument.theme+xml +application/vnd.openxmlformats-officedocument.themeOverride+xml +application/vnd.openxmlformats-officedocument.vmlDrawing +application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.document docx +application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.template dotx +application/vnd.openxmlformats-officedocument.wordprocessingml.template.main+xml +application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml +application/vnd.openxmlformats-package.core-properties+xml +application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml +application/vnd.openxmlformats-package.relationships+xml +application/vnd.oracle.resource+json +application/vnd.orange.indata +application/vnd.osa.netdeploy ndc +application/vnd.osgeo.mapguide.package mgp +# jar: application/x-java-archive +application/vnd.osgi.bundle +application/vnd.osgi.dp dp +application/vnd.osgi.subsystem esa +application/vnd.otps.ct-kip+xml +application/vnd.oxli.countgraph oxlicg +application/vnd.pagerduty+json +application/vnd.palm prc pdb pqa oprc +application/vnd.panoply plp +application/vnd.paos+xml +application/vnd.pawaafile paw +application/vnd.pcos +application/vnd.pg.format str +application/vnd.pg.osasli ei6 +application/vnd.piaccess.application-license pil +application/vnd.picsel efif +application/vnd.pmi.widget wg +application/vnd.poc.group-advertisement+xml +application/vnd.pocketlearn plf +application/vnd.powerbuilder6 pbd +application/vnd.powerbuilder6-s +application/vnd.powerbuilder7 +application/vnd.powerbuilder7-s +application/vnd.powerbuilder75 +application/vnd.powerbuilder75-s +application/vnd.preminet preminet +application/vnd.previewsystems.box box vbox +application/vnd.proteus.magazine mgz +application/vnd.publishare-delta-tree qps +# pti: image/prs.pti +application/vnd.pvi.ptid1 ptid +application/vnd.pwg-multiplexed +application/vnd.pwg-xhtml-print+xml +application/vnd.qualcomm.brew-app-res bar +application/vnd.quarantainenet +application/vnd.Quark.QuarkXPress qxd qxt qwd qwt qxl qxb +application/vnd.quobject-quoxdocument quox quiz +application/vnd.radisys.moml+xml +application/vnd.radisys.msml-audit-conf+xml +application/vnd.radisys.msml-audit-conn+xml +application/vnd.radisys.msml-audit-dialog+xml +application/vnd.radisys.msml-audit-stream+xml +application/vnd.radisys.msml-audit+xml +application/vnd.radisys.msml-conf+xml +application/vnd.radisys.msml-dialog-base+xml +application/vnd.radisys.msml-dialog-fax-detect+xml +application/vnd.radisys.msml-dialog-fax-sendrecv+xml +application/vnd.radisys.msml-dialog-group+xml +application/vnd.radisys.msml-dialog-speech+xml +application/vnd.radisys.msml-dialog-transform+xml +application/vnd.radisys.msml-dialog+xml +application/vnd.radisys.msml+xml +application/vnd.rainstor.data tree +application/vnd.rapid +application/vnd.rar rar +application/vnd.realvnc.bed bed +application/vnd.recordare.musicxml mxl +application/vnd.recordare.musicxml+xml +application/vnd.RenLearn.rlprint +application/vnd.rig.cryptonote cryptonote +application/vnd.route66.link66+xml link66 +# gbr: application/rpki-ghostbusters +application/vnd.rs-274x +application/vnd.ruckus.download +application/vnd.s3sms +application/vnd.sailingtracker.track st +application/vnd.sbm.cid +application/vnd.sbm.mid2 +application/vnd.scribus scd sla slaz +application/vnd.sealed.3df s3df +application/vnd.sealed.csf scsf +application/vnd.sealed.doc sdoc sdo s1w +application/vnd.sealed.eml seml sem +application/vnd.sealed.mht smht smh +application/vnd.sealed.net +# spp: application/scvp-vp-response +application/vnd.sealed.ppt sppt s1p +application/vnd.sealed.tiff stif +application/vnd.sealed.xls sxls sxl s1e +# stm: audio/x-stm +application/vnd.sealedmedia.softseal.html stml s1h +application/vnd.sealedmedia.softseal.pdf spdf spd s1a +application/vnd.seemail see +application/vnd.sema sema +application/vnd.semd semd +application/vnd.semf semf +application/vnd.shana.informed.formdata ifm +application/vnd.shana.informed.formtemplate itp +application/vnd.shana.informed.interchange iif +application/vnd.shana.informed.package ipk +application/vnd.SimTech-MindMapper twd twds +application/vnd.siren+json +application/vnd.smaf mmf +application/vnd.smart.notebook notebook +application/vnd.smart.teacher teacher +application/vnd.software602.filler.form+xml fo +application/vnd.software602.filler.form-xml-zip zfo +application/vnd.solent.sdkm+xml sdkm sdkd +application/vnd.spotfire.dxp dxp +application/vnd.spotfire.sfs sfs +application/vnd.sss-cod +application/vnd.sss-dtf +application/vnd.sss-ntf +application/vnd.stepmania.package smzip +application/vnd.stepmania.stepchart sm +application/vnd.street-stream +application/vnd.sun.wadl+xml wadl +application/vnd.sus-calendar sus susp +application/vnd.svd +application/vnd.swiftview-ics +application/vnd.syncml+xml xsm +application/vnd.syncml.dm+wbxml bdm +application/vnd.syncml.dm+xml xdm +application/vnd.syncml.dm.notification +application/vnd.syncml.dmddf+wbxml +application/vnd.syncml.dmddf+xml ddf +application/vnd.syncml.dmtnds+wbxml +application/vnd.syncml.dmtnds+xml +application/vnd.syncml.ds.notification +application/vnd.tableschema+json +application/vnd.tao.intent-module-archive tao +application/vnd.tcpdump.pcap pcap cap dmp +application/vnd.theqvd qvd +application/vnd.tmd.mediaflex.api+xml +application/vnd.tml vfr viaframe +application/vnd.tmobile-livetv tmo +application/vnd.tri.onesource +application/vnd.trid.tpt tpt +application/vnd.triscape.mxs mxs +application/vnd.trueapp tra +application/vnd.truedoc +# cab: application/vnd.ms-cab-compressed +application/vnd.ubisoft.webplayer +application/vnd.ufdl ufdl ufd frm +application/vnd.uiq.theme utz +application/vnd.umajin umj +application/vnd.unity unityweb +application/vnd.uoml+xml uoml uo +application/vnd.uplanet.alert +application/vnd.uplanet.alert-wbxml +application/vnd.uplanet.bearer-choice +application/vnd.uplanet.bearer-choice-wbxml +application/vnd.uplanet.cacheop +application/vnd.uplanet.cacheop-wbxml +application/vnd.uplanet.channel +application/vnd.uplanet.channel-wbxml +application/vnd.uplanet.list +application/vnd.uplanet.list-wbxml +application/vnd.uplanet.listcmd +application/vnd.uplanet.listcmd-wbxml +application/vnd.uplanet.signal +application/vnd.uri-map urim urimap +application/vnd.valve.source.material vmt +application/vnd.vcx vcx +# sxi: application/vnd.sun.xml.impress +application/vnd.vd-study mxi study-inter model-inter +# mcd: application/vnd.mcd +application/vnd.vectorworks vwx +application/vnd.vel+json +application/vnd.verimatrix.vcas +application/vnd.vidsoft.vidconference vsc +application/vnd.visio vsd vst vsw vss +application/vnd.visionary vis +# vsc: application/vnd.vidsoft.vidconference +application/vnd.vividence.scriptfile +application/vnd.vsf vsf +application/vnd.wap.sic sic +application/vnd.wap.slc slc +application/vnd.wap.wbxml wbxml +application/vnd.wap.wmlc wmlc +application/vnd.wap.wmlscriptc wmlsc +application/vnd.webturbo wtb +application/vnd.wfa.p2p p2p +application/vnd.wfa.wsc wsc +application/vnd.windows.devicepairing +application/vnd.wmc wmc +application/vnd.wmf.bootstrap +# nb: application/mathematica for now +application/vnd.wolfram.mathematica +application/vnd.wolfram.mathematica.package m +application/vnd.wolfram.player nbp +application/vnd.wordperfect wpd +application/vnd.wqd wqd +application/vnd.wrq-hp3000-labelled +application/vnd.wt.stf stf +application/vnd.wv.csp+xml +application/vnd.wv.csp+wbxml wv +application/vnd.wv.ssp+xml +application/vnd.xacml+json +application/vnd.xara xar +application/vnd.xfdl xfdl xfd +application/vnd.xfdl.webform +application/vnd.xmi+xml +application/vnd.xmpie.cpkg cpkg +application/vnd.xmpie.dpkg dpkg +# dpkg: application/vnd.xmpie.dpkg +application/vnd.xmpie.plan +application/vnd.xmpie.ppkg ppkg +application/vnd.xmpie.xlim xlim +application/vnd.yamaha.hv-dic hvd +application/vnd.yamaha.hv-script hvs +application/vnd.yamaha.hv-voice hvp +application/vnd.yamaha.openscoreformat osf +application/vnd.yamaha.openscoreformat.osfpvg+xml +application/vnd.yamaha.remote-setup +application/vnd.yamaha.smaf-audio saf +application/vnd.yamaha.smaf-phrase spf +application/vnd.yamaha.through-ngn +application/vnd.yamaha.tunnel-udpencap +application/vnd.yaoweme yme +application/vnd.yellowriver-custom-menu cmp +application/vnd.zul zir zirz +application/vnd.zzazz.deck+xml zaz +application/voicexml+xml vxml +application/vq-rtcp-xr +application/watcherinfo+xml wif +application/whoispp-query +application/whoispp-response +application/widget wgt +application/wita +application/wordperfect5.1 +application/wsdl+xml wsdl +application/wspolicy+xml wspolicy +# yes, this *is* IANA registered despite of x- +application/x-www-form-urlencoded +application/x400-bp +application/xacml+xml +application/xcap-att+xml xav +application/xcap-caps+xml xca +application/xcap-diff+xml xdf +application/xcap-el+xml xel +application/xcap-error+xml xer +application/xcap-ns+xml xns +application/xcon-conference-info-diff+xml +application/xcon-conference-info+xml +application/xenc+xml +application/xhtml+xml xhtml xhtm xht +# xml, xsd, rng: text/xml +application/xml +# mod: audio/x-mod +application/xml-dtd dtd +# ent: text/xml-external-parsed-entity +application/xml-external-parsed-entity +application/xml-patch+xml +application/xmpp+xml +application/xop+xml xop +application/xslt+xml xsl xslt +application/xv+xml mxml xhvml xvml xvm +application/yang yang +application/yang-data+json +application/yang-data+xml +application/yang-patch+json +application/yang-patch+xml +application/yin+xml yin +application/zip zip +application/zlib +audio/1d-interleaved-parityfec +audio/32kadpcm 726 +# 3gp, 3gpp: video/3gpp +audio/3gpp +# 3g2, 3gpp2: video/3gpp2 +audio/3gpp2 +audio/ac3 ac3 +audio/AMR amr +audio/AMR-WB awb +audio/amr-wb+ +audio/aptx +audio/asc acn +# aa3, omg: audio/ATRAC3 +audio/ATRAC-ADVANCED-LOSSLESS aal +# aa3, omg: audio/ATRAC3 +audio/ATRAC-X atx +audio/ATRAC3 at3 aa3 omg +audio/basic au snd +audio/BV16 +audio/BV32 +audio/clearmode +audio/CN +audio/DAT12 +audio/dls dls +audio/dsr-es201108 +audio/dsr-es202050 +audio/dsr-es202211 +audio/dsr-es202212 +audio/DV +audio/DVI4 +audio/eac3 +audio/encaprtp +audio/EVRC evc +# qcp: audio/qcelp +audio/EVRC-QCP +audio/EVRC0 +audio/EVRC1 +audio/EVRCB evb +audio/EVRCB0 +audio/EVRCB1 +audio/EVRCNW enw +audio/EVRCNW0 +audio/EVRCNW1 +audio/EVRCWB evw +audio/EVRCWB0 +audio/EVRCWB1 +audio/EVS +audio/example +audio/fwdred +audio/G711-0 +audio/G719 +audio/G722 +audio/G7221 +audio/G723 +audio/G726-16 +audio/G726-24 +audio/G726-32 +audio/G726-40 +audio/G728 +audio/G729 +audio/G7291 +audio/G729D +audio/G729E +audio/GSM +audio/GSM-EFR +audio/GSM-HR-08 +audio/iLBC lbc +audio/ip-mr_v2.5 +# wav: audio/x-wav +audio/L16 l16 +audio/L20 +audio/L24 +audio/L8 +audio/LPC +audio/MELP +audio/MELP600 +audio/MELP1200 +audio/MELP2400 +audio/mobile-xmf mxmf +# mp4, mpg4: video/mp4, see RFC 4337 +audio/mp4 m4a +audio/MP4A-LATM +audio/MPA +audio/mpa-robust +audio/mpeg mp3 mpga mp1 mp2 +audio/mpeg4-generic +audio/ogg oga ogg opus spx +audio/opus +audio/parityfec +audio/PCMA +audio/PCMA-WB +audio/PCMU +audio/PCMU-WB +audio/prs.sid sid psid +audio/qcelp qcp +audio/raptorfec +audio/RED +audio/rtp-enc-aescm128 +audio/rtp-midi +audio/rtploopback +audio/rtx +audio/SMV smv +# qcp: audio/qcelp, see RFC 3625 +audio/SMV-QCP +audio/SMV0 +# mid: audio/midi +audio/sp-midi +audio/speex +audio/t140c +audio/t38 +audio/telephone-event +audio/tone +audio/UEMCLIP +audio/ulpfec +audio/VDVI +audio/VMR-WB +audio/vnd.3gpp.iufp +audio/vnd.4SB +audio/vnd.audikoz koz +audio/vnd.CELP +audio/vnd.cisco.nse +audio/vnd.cmles.radio-events +audio/vnd.cns.anp1 +audio/vnd.cns.inf1 +audio/vnd.dece.audio uva uvva +audio/vnd.digital-winds eol +audio/vnd.dlna.adts +audio/vnd.dolby.heaac.1 +audio/vnd.dolby.heaac.2 +audio/vnd.dolby.mlp mlp +audio/vnd.dolby.mps +audio/vnd.dolby.pl2 +audio/vnd.dolby.pl2x +audio/vnd.dolby.pl2z +audio/vnd.dolby.pulse.1 +audio/vnd.dra +# wav: audio/x-wav, cpt: application/mac-compactpro +audio/vnd.dts dts +audio/vnd.dts.hd dtshd +# dvb: video/vnd.dvb.file +audio/vnd.dvb.file +audio/vnd.everad.plj plj +# rm: audio/x-pn-realaudio +audio/vnd.hns.audio +audio/vnd.lucent.voice lvp +audio/vnd.ms-playready.media.pya pya +# mxmf: audio/mobile-xmf +audio/vnd.nokia.mobile-xmf +audio/vnd.nortel.vbk vbk +audio/vnd.nuera.ecelp4800 ecelp4800 +audio/vnd.nuera.ecelp7470 ecelp7470 +audio/vnd.nuera.ecelp9600 ecelp9600 +audio/vnd.octel.sbc +# audio/vnd.qcelp deprecated in favour of audio/qcelp +audio/vnd.rhetorex.32kadpcm +audio/vnd.rip rip +audio/vnd.sealedmedia.softseal.mpeg smp3 smp s1m +audio/vnd.vmx.cvsd +audio/vorbis +audio/vorbis-config +font/collection ttc +font/otf otf +font/sfnt +font/ttf ttf +font/woff woff +font/woff2 woff2 +image/bmp bmp dib +image/cgm cgm +image/dicom-rle drle +image/emf emf +image/example +image/fits fits fit fts +image/g3fax +image/gif gif +image/ief ief +image/jls jls +image/jp2 jp2 jpg2 +image/jpeg jpg jpeg jpe jfif +image/jpm jpm jpgm +image/jpx jpx jpf +image/ktx ktx +image/naplps +image/png png +image/prs.btif btif btf +image/prs.pti pti +image/pwg-raster +image/svg+xml svg svgz +image/t38 t38 +image/tiff tiff tif +image/tiff-fx tfx +image/vnd.adobe.photoshop psd +image/vnd.airzip.accelerator.azv azv +image/vnd.cns.inf2 +image/vnd.dece.graphic uvi uvvi uvg uvvg +image/vnd.djvu djvu djv +# sub: text/vnd.dvb.subtitle +image/vnd.dvb.subtitle +image/vnd.dwg dwg +image/vnd.dxf dxf +image/vnd.fastbidsheet fbs +image/vnd.fpx fpx +image/vnd.fst fst +image/vnd.fujixerox.edmics-mmr mmr +image/vnd.fujixerox.edmics-rlc rlc +image/vnd.globalgraphics.pgb pgb +image/vnd.microsoft.icon ico +image/vnd.mix +image/vnd.mozilla.apng apng +image/vnd.ms-modi mdi +image/vnd.net-fpx +image/vnd.radiance hdr rgbe xyze +image/vnd.sealed.png spng spn s1n +image/vnd.sealedmedia.softseal.gif sgif sgi s1g +image/vnd.sealedmedia.softseal.jpg sjpg sjp s1j +image/vnd.svf +image/vnd.tencent.tap tap +image/vnd.valve.source.texture vtf +image/vnd.wap.wbmp wbmp +image/vnd.xiff xif +image/vnd.zbrush.pcx pcx +image/wmf wmf +message/CPIM +message/delivery-status +message/disposition-notification +message/example +message/external-body +message/feedback-report +message/global u8msg +message/global-delivery-status u8dsn +message/global-disposition-notification u8mdn +message/global-headers u8hdr +message/http +# cl: application/simple-filter+xml +message/imdn+xml +# message/news obsoleted by message/rfc822 +message/partial +message/rfc822 eml mail art +message/s-http +message/sip +message/sipfrag +message/tracking-status +message/vnd.si.simp +# wsc: application/vnd.wfa.wsc +message/vnd.wfa.wsc +model/example +model/gltf+json gltf +model/iges igs iges +model/mesh msh mesh silo +model/vnd.collada+xml dae +model/vnd.dwf dwf +# 3dml, 3dm: text/vnd.in3d.3dml +model/vnd.flatland.3dml +model/vnd.gdl gdl gsm win dor lmp rsm msm ism +model/vnd.gs-gdl +model/vnd.gtw gtw +model/vnd.moml+xml moml +model/vnd.mts mts +model/vnd.opengex ogex +model/vnd.parasolid.transmit.binary x_b xmt_bin +model/vnd.parasolid.transmit.text x_t xmt_txt +model/vnd.rosette.annotated-data-model +model/vnd.valve.source.compiled-map bsp +model/vnd.vtu vtu +model/vrml wrl vrml +# x3db: model/x3d+xml +model/x3d+fastinfoset +# x3d: application/vnd.hzn-3d-crossword +model/x3d+xml x3db +model/x3d-vrml x3dv x3dvz +multipart/alternative +multipart/appledouble +multipart/byteranges +multipart/digest +multipart/encrypted +multipart/form-data +multipart/header-set +multipart/mixed +multipart/parallel +multipart/related +multipart/report +multipart/signed +multipart/vnd.bint.med-plus bmed +multipart/voice-message vpm +multipart/x-mixed-replace +text/1d-interleaved-parityfec +text/cache-manifest appcache manifest +text/calendar ics ifb +text/css css +text/csv csv +text/csv-schema csvs +text/directory +text/dns soa zone +text/encaprtp +# text/ecmascript obsoleted by application/ecmascript +text/enriched +text/example +text/fwdred +text/grammar-ref-list +text/html html htm +# text/javascript obsoleted by application/javascript +text/jcr-cnd cnd +text/markdown markdown md +text/mizar miz +text/n3 n3 +text/parameters +text/parityfec +text/plain txt asc text pm el c h cc hh cxx hxx f90 conf log +text/provenance-notation provn +text/prs.fallenstein.rst rst +text/prs.lines.tag tag dsc +text/prs.prop.logic +text/raptorfec +text/RED +text/rfc822-headers +text/richtext rtx +# rtf: application/rtf +text/rtf +text/rtp-enc-aescm128 +text/rtploopback +text/rtx +text/sgml sgml sgm +text/strings +text/t140 +text/tab-separated-values tsv +text/troff t tr roff +text/turtle ttl +text/ulpfec +text/uri-list uris uri +text/vcard vcf vcard +text/vnd.a a +text/vnd.abc abc +text/vnd.ascii-art ascii +# curl: application/vnd.curl +text/vnd.curl +text/vnd.debian.copyright copyright +text/vnd.DMClientScript dms +text/vnd.dvb.subtitle sub +text/vnd.esmertec.theme-descriptor jtd +text/vnd.fly fly +text/vnd.fmi.flexstor flx +text/vnd.graphviz gv dot +text/vnd.in3d.3dml 3dml 3dm +text/vnd.in3d.spot spot spo +text/vnd.IPTC.NewsML +text/vnd.IPTC.NITF +text/vnd.latex-z +text/vnd.motorola.reflex +text/vnd.ms-mediapackage mpf +text/vnd.net2phone.commcenter.command ccc +text/vnd.radisys.msml-basic-layout +text/vnd.si.uricatalogue uric +text/vnd.sun.j2me.app-descriptor jad +text/vnd.trolltech.linguist ts +text/vnd.wap.si si +text/vnd.wap.sl sl +text/vnd.wap.wml wml +text/vnd.wap.wmlscript wmls +text/xml xml xsd rng +text/xml-external-parsed-entity ent +video/1d-interleaved-parityfec +video/3gpp 3gp 3gpp +video/3gpp2 3g2 3gpp2 +video/3gpp-tt +video/BMPEG +video/BT656 +video/CelB +video/DV +video/encaprtp +video/example +video/H261 +video/H263 +video/H263-1998 +video/H263-2000 +video/H264 +video/H264-RCDO +video/H264-SVC +video/H265 +video/iso.segment m4s +video/JPEG +video/jpeg2000 +video/mj2 mj2 mjp2 +video/MP1S +video/MP2P +video/MP2T +video/mp4 mp4 mpg4 m4v +video/MP4V-ES +video/mpeg mpeg mpg mpe m1v m2v +video/mpeg4-generic +video/MPV +video/nv +video/ogg ogv +video/parityfec +video/pointer +video/quicktime mov qt +video/raptorfec +video/raw +video/rtp-enc-aescm128 +video/rtploopback +video/rtx +video/SMPTE292M +video/ulpfec +video/vc1 +video/vnd.CCTV +video/vnd.dece.hd uvh uvvh +video/vnd.dece.mobile uvm uvvm +video/vnd.dece.mp4 uvu uvvu +video/vnd.dece.pd uvp uvvp +video/vnd.dece.sd uvs uvvs +video/vnd.dece.video uvv uvvv +video/vnd.directv.mpeg +video/vnd.directv.mpeg-tts +video/vnd.dlna.mpeg-tts +video/vnd.dvb.file dvb +video/vnd.fvt fvt +# rm: audio/x-pn-realaudio +video/vnd.hns.video +video/vnd.iptvforum.1dparityfec-1010 +video/vnd.iptvforum.1dparityfec-2005 +video/vnd.iptvforum.2dparityfec-1010 +video/vnd.iptvforum.2dparityfec-2005 +video/vnd.iptvforum.ttsavc +video/vnd.iptvforum.ttsmpeg2 +video/vnd.motorola.video +video/vnd.motorola.videop +video/vnd.mpegurl mxu m4u +video/vnd.ms-playready.media.pyv pyv +video/vnd.nokia.interleaved-multimedia nim +video/vnd.nokia.videovoip +# mp4: video/mp4 +video/vnd.objectvideo +video/vnd.radgamettools.bink bik bk2 +video/vnd.radgamettools.smacker smk +video/vnd.sealed.mpeg1 smpg s11 +# smpg: video/vnd.sealed.mpeg1 +video/vnd.sealed.mpeg4 s14 +video/vnd.sealed.swf sswf ssw +video/vnd.sealedmedia.softseal.mov smov smo s1q +# uvu, uvvu: video/vnd.dece.mp4 +video/vnd.uvvu.mp4 +video/vnd.vivo viv +video/VP8 + +# Non-IANA types + +application/mac-compactpro cpt +application/metalink+xml metalink +application/owl+xml owx +application/rss+xml rss +application/vnd.android.package-archive apk +application/vnd.oma.dd+xml dd +application/vnd.oma.drm.content dcf +# odf: application/vnd.oasis.opendocument.formula +application/vnd.oma.drm.dcf o4a o4v +application/vnd.oma.drm.message dm +application/vnd.oma.drm.rights+wbxml drc +application/vnd.oma.drm.rights+xml dr +application/vnd.sun.xml.calc sxc +application/vnd.sun.xml.calc.template stc +application/vnd.sun.xml.draw sxd +application/vnd.sun.xml.draw.template std +application/vnd.sun.xml.impress sxi +application/vnd.sun.xml.impress.template sti +application/vnd.sun.xml.math sxm +application/vnd.sun.xml.writer sxw +application/vnd.sun.xml.writer.global sxg +application/vnd.sun.xml.writer.template stw +application/vnd.symbian.install sis +application/vnd.wap.mms-message mms +application/x-annodex anx +application/x-bcpio bcpio +application/x-bittorrent torrent +application/x-bzip2 bz2 +application/x-cdlink vcd +application/x-chrome-extension crx +application/x-cpio cpio +application/x-csh csh +application/x-director dcr dir dxr +application/x-dvi dvi +application/x-futuresplash spl +application/x-gtar gtar +application/x-hdf hdf +application/x-java-archive jar +application/x-java-jnlp-file jnlp +application/x-java-pack200 pack +application/x-killustrator kil +application/x-latex latex +application/x-netcdf nc cdf +application/x-perl pl +application/x-rpm rpm +application/x-sh sh +application/x-shar shar +application/x-stuffit sit +application/x-sv4cpio sv4cpio +application/x-sv4crc sv4crc +application/x-tar tar +application/x-tcl tcl +application/x-tex tex +application/x-texinfo texinfo texi +application/x-troff-man man 1 2 3 4 5 6 7 8 +application/x-troff-me me +application/x-troff-ms ms +application/x-ustar ustar +application/x-wais-source src +application/x-xpinstall xpi +application/x-xspf+xml xspf +application/x-xz xz +audio/midi mid midi kar +audio/x-aiff aif aiff aifc +audio/x-annodex axa +audio/x-flac flac +audio/x-matroska mka +audio/x-mod mod ult uni m15 mtm 669 med +audio/x-mpegurl m3u +audio/x-ms-wax wax +audio/x-ms-wma wma +audio/x-pn-realaudio ram rm +audio/x-realaudio ra +audio/x-s3m s3m +audio/x-stm stm +audio/x-wav wav +chemical/x-xyz xyz +image/webp webp +image/x-cmu-raster ras +image/x-portable-anymap pnm +image/x-portable-bitmap pbm +image/x-portable-graymap pgm +image/x-portable-pixmap ppm +image/x-rgb rgb +image/x-targa tga +image/x-xbitmap xbm +image/x-xpixmap xpm +image/x-xwindowdump xwd +text/html-sandboxed sandboxed +text/x-pod pod +text/x-setext etx +video/webm webm +video/x-annodex axv +video/x-flv flv +video/x-javafx fxm +video/x-matroska mkv +video/x-matroska-3d mk3d +video/x-ms-asf asx +video/x-ms-wm wm +video/x-ms-wmv wmv +video/x-ms-wmx wmx +video/x-ms-wvx wvx +video/x-msvideo avi +video/x-sgi-movie movie +x-conference/x-cooltalk ice +x-epoc/x-sisx-app sisx diff --git a/cmd/swarm/mru.go b/cmd/swarm/mru.go index 6176b6d6c..cc7f634cb 100644 --- a/cmd/swarm/mru.go +++ b/cmd/swarm/mru.go @@ -19,10 +19,11 @@ package main import ( "fmt" - "strconv" "strings" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/cmd/utils" swarm "github.com/ethereum/go-ethereum/swarm/api/client" @@ -34,62 +35,38 @@ func NewGenericSigner(ctx *cli.Context) mru.Signer { return mru.NewGenericSigner(getPrivKey(ctx)) } +func getTopic(ctx *cli.Context) (topic mru.Topic) { + var name = ctx.String(SwarmResourceNameFlag.Name) + var relatedTopic = ctx.String(SwarmResourceTopicFlag.Name) + var relatedTopicBytes []byte + var err error + + if relatedTopic != "" { + relatedTopicBytes, err = hexutil.Decode(relatedTopic) + if err != nil { + utils.Fatalf("Error parsing topic: %s", err) + } + } + + topic, err = mru.NewTopic(name, relatedTopicBytes) + if err != nil { + utils.Fatalf("Error parsing topic: %s", err) + } + return topic +} + // swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]] // swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false] // swarm resource info <Manifest Address or ENS domain> func resourceCreate(ctx *cli.Context) { - args := ctx.Args() - var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) - multihash = ctx.Bool(SwarmResourceMultihashFlag.Name) - initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name) - name = ctx.String(SwarmResourceNameFlag.Name) + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client = swarm.NewClient(bzzapi) ) - if len(args) < 1 { - fmt.Println("Incorrect number of arguments") - cli.ShowCommandHelpAndExit(ctx, "create", 1) - return - } - signer := NewGenericSigner(ctx) - frequency, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - fmt.Printf("Frequency formatting error: %s\n", err.Error()) - cli.ShowCommandHelpAndExit(ctx, "create", 1) - return - } - - metadata := mru.ResourceMetadata{ - Name: name, - Frequency: frequency, - Owner: signer.Address(), - } - - var newResourceRequest *mru.Request - if initialData != "" { - initialDataBytes, err := hexutil.Decode(initialData) - if err != nil { - fmt.Printf("Error parsing data: %s\n", err.Error()) - cli.ShowCommandHelpAndExit(ctx, "create", 1) - return - } - newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata) - if err != nil { - utils.Fatalf("Error creating new resource request: %s", err) - } - newResourceRequest.SetData(initialDataBytes, multihash) - if err = newResourceRequest.Sign(signer); err != nil { - utils.Fatalf("Error signing resource update: %s", err.Error()) - } - } else { - newResourceRequest, err = mru.NewCreateRequest(&metadata) - if err != nil { - utils.Fatalf("Error creating new resource request: %s", err) - } - } + newResourceRequest := mru.NewFirstRequest(getTopic(ctx)) + newResourceRequest.View.User = resourceGetUser(ctx) manifestAddress, err := client.CreateResource(newResourceRequest) if err != nil { @@ -104,32 +81,43 @@ func resourceUpdate(ctx *cli.Context) { args := ctx.Args() var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) - multihash = ctx.Bool(SwarmResourceMultihashFlag.Name) + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client = swarm.NewClient(bzzapi) + manifestAddressOrDomain = ctx.String(SwarmResourceManifestFlag.Name) ) - if len(args) < 2 { + if len(args) < 1 { fmt.Println("Incorrect number of arguments") cli.ShowCommandHelpAndExit(ctx, "update", 1) return } + signer := NewGenericSigner(ctx) - manifestAddressOrDomain := args[0] - data, err := hexutil.Decode(args[1]) + + data, err := hexutil.Decode(args[0]) if err != nil { utils.Fatalf("Error parsing data: %s", err.Error()) return } + var updateRequest *mru.Request + var query *mru.Query + + if manifestAddressOrDomain == "" { + query = new(mru.Query) + query.User = signer.Address() + query.Topic = getTopic(ctx) + + } + // Retrieve resource status and metadata out of the manifest - updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain) + updateRequest, err = client.GetResourceMetadata(query, manifestAddressOrDomain) if err != nil { utils.Fatalf("Error retrieving resource status: %s", err.Error()) } // set the new data - updateRequest.SetData(data, multihash) + updateRequest.SetData(data) // sign update if err = updateRequest.Sign(signer); err != nil { @@ -146,17 +134,19 @@ func resourceUpdate(ctx *cli.Context) { func resourceInfo(ctx *cli.Context) { var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + client = swarm.NewClient(bzzapi) + manifestAddressOrDomain = ctx.String(SwarmResourceManifestFlag.Name) ) - args := ctx.Args() - if len(args) < 1 { - fmt.Println("Incorrect number of arguments.") - cli.ShowCommandHelpAndExit(ctx, "info", 1) - return + + var query *mru.Query + if manifestAddressOrDomain == "" { + query = new(mru.Query) + query.Topic = getTopic(ctx) + query.User = resourceGetUser(ctx) } - manifestAddressOrDomain := args[0] - metadata, err := client.GetResourceMetadata(manifestAddressOrDomain) + + metadata, err := client.GetResourceMetadata(query, manifestAddressOrDomain) if err != nil { utils.Fatalf("Error retrieving resource metadata: %s", err.Error()) return @@ -167,3 +157,16 @@ func resourceInfo(ctx *cli.Context) { } fmt.Println(string(encodedMetadata)) } + +func resourceGetUser(ctx *cli.Context) common.Address { + var user = ctx.String(SwarmResourceUserFlag.Name) + if user != "" { + return common.HexToAddress(user) + } + pk := getPrivKey(ctx) + if pk == nil { + utils.Fatalf("Cannot read private key. Must specify --user or --bzzaccount") + } + return crypto.PubkeyToAddress(pk.PublicKey) + +} diff --git a/cmd/swarm/mru_test.go b/cmd/swarm/mru_test.go new file mode 100644 index 000000000..142cf9cfd --- /dev/null +++ b/cmd/swarm/mru_test.go @@ -0,0 +1,182 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/swarm/testutil" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/swarm/storage/mru" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" + swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" +) + +func TestCLIResourceUpdate(t *testing.T) { + + srv := testutil.NewTestSwarmServer(t, func(api *api.API) testutil.TestServer { + return swarmhttp.NewServer(api, "") + }, nil) + log.Info("starting 1 node cluster") + defer srv.Close() + + // create a private key file for signing + pkfile, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer pkfile.Close() + defer os.Remove(pkfile.Name()) + + privkeyHex := "0000000000000000000000000000000000000000000000000000000000001979" + privKey, _ := crypto.HexToECDSA(privkeyHex) + address := crypto.PubkeyToAddress(privKey.PublicKey) + + // save the private key to a file + _, err = io.WriteString(pkfile, privkeyHex) + if err != nil { + t.Fatal(err) + } + + // compose a topic. We'll be doing quotes about Miguel de Cervantes + var topic mru.Topic + subject := []byte("Miguel de Cervantes") + copy(topic[:], subject[:]) + name := "quotes" + + // prepare some data for the update + data := []byte("En boca cerrada no entran moscas") + hexData := hexutil.Encode(data) + + flags := []string{ + "--bzzapi", srv.URL, + "--bzzaccount", pkfile.Name(), + "resource", "update", + "--topic", topic.Hex(), + "--name", name, + hexData} + + // create an update and expect an exit without errors + log.Info(fmt.Sprintf("updating a resource with 'swarm resource update'")) + cmd := runSwarm(t, flags...) + cmd.ExpectExit() + + // now try to get the update using the client + client := swarm.NewClient(srv.URL) + if err != nil { + t.Fatal(err) + } + + // build the same topic as before, this time + // we use NewTopic to create a topic automatically. + topic, err = mru.NewTopic(name, subject) + if err != nil { + t.Fatal(err) + } + + // View configures whose updates we will be looking up. + view := mru.View{ + Topic: topic, + User: address, + } + + // Build a query to get the latest update + query := mru.NewQueryLatest(&view, lookup.NoClue) + + // retrieve content! + reader, err := client.GetResource(query, "") + if err != nil { + t.Fatal(err) + } + + retrieved, err := ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + + // check we retrieved the sent information + if !bytes.Equal(data, retrieved) { + t.Fatalf("Received %s, expected %s", retrieved, data) + } + + // Now retrieve info for the next update + flags = []string{ + "--bzzapi", srv.URL, + "resource", "info", + "--topic", topic.Hex(), + "--user", address.Hex(), + } + + log.Info(fmt.Sprintf("getting resource info with 'swarm resource info'")) + cmd = runSwarm(t, flags...) + _, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout + cmd.ExpectExit() + + // verify we can deserialize the result as a valid JSON + var request mru.Request + err = json.Unmarshal([]byte(matches[0]), &request) + if err != nil { + t.Fatal(err) + } + + // make sure the retrieved view is the same + if request.View != view { + t.Fatalf("Expected view to be: %s, got %s", view, request.View) + } + + // test publishing a manifest + flags = []string{ + "--bzzapi", srv.URL, + "--bzzaccount", pkfile.Name(), + "resource", "create", + "--topic", topic.Hex(), + } + + log.Info(fmt.Sprintf("Publishing manifest with 'swarm resource create'")) + cmd = runSwarm(t, flags...) + _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) // regex hack to extract stdout + cmd.ExpectExit() + + manifestAddress := matches[0] // read the received resource manifest + + // now attempt to lookup the latest update using a manifest instead + reader, err = client.GetResource(nil, manifestAddress) + if err != nil { + t.Fatal(err) + } + + retrieved, err = ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(data, retrieved) { + t.Fatalf("Received %s, expected %s", retrieved, data) + } +} diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go index e2f54c8ff..4be0495f8 100644 --- a/cmd/swarm/run_test.go +++ b/cmd/swarm/run_test.go @@ -242,7 +242,7 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { "--bzzaccount", bzzaccount, "--bzznetworkid", "321", "--bzzport", httpPort, - "--verbosity", "3", + "--verbosity", fmt.Sprint(*loglevel), ) node.Cmd.InputLine(testPassphrase) defer func() { @@ -318,7 +318,7 @@ func newTestNode(t *testing.T, dir string) *testNode { "--bzzaccount", account.Address.String(), "--bzznetworkid", "321", "--bzzport", httpPort, - "--verbosity", "3", + "--verbosity", fmt.Sprint(*loglevel), ) node.Cmd.InputLine(testPassphrase) defer func() { diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go index f76cb1b98..2225127cf 100644 --- a/cmd/swarm/upload.go +++ b/cmd/swarm/upload.go @@ -22,16 +22,15 @@ import ( "fmt" "io" "io/ioutil" - "mime" - "net/http" "os" "os/user" "path" "path/filepath" "strings" - "github.com/ethereum/go-ethereum/cmd/utils" swarm "github.com/ethereum/go-ethereum/swarm/api/client" + + "github.com/ethereum/go-ethereum/cmd/utils" "gopkg.in/urfave/cli.v1" ) @@ -118,10 +117,9 @@ func upload(ctx *cli.Context) { return "", fmt.Errorf("error opening file: %s", err) } defer f.Close() - if mimeType == "" { - mimeType = detectMimeType(file) + if mimeType != "" { + f.ContentType = mimeType } - f.ContentType = mimeType return client.Upload(f, "", toEncrypt) } } @@ -161,19 +159,3 @@ func homeDir() string { } return "" } - -func detectMimeType(file string) string { - if ext := filepath.Ext(file); ext != "" { - return mime.TypeByExtension(ext) - } - f, err := os.Open(file) - if err != nil { - return "" - } - defer f.Close() - buf := make([]byte, 512) - if n, _ := f.Read(buf); n > 0 { - return http.DetectContentType(buf) - } - return "" -} diff --git a/cmd/swarm/upload_test.go b/cmd/swarm/upload_test.go index c3199dadc..84205031b 100644 --- a/cmd/swarm/upload_test.go +++ b/cmd/swarm/upload_test.go @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. +// +build !windows + package main import ( @@ -32,7 +34,7 @@ import ( "github.com/ethereum/go-ethereum/log" swarm "github.com/ethereum/go-ethereum/swarm/api/client" - colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-colorable" ) var loglevel = flag.Int("loglevel", 3, "verbosity of logs") diff --git a/contracts/ens/ens.go b/contracts/ens/ens.go index 75d9d0e4b..b7448c471 100644 --- a/contracts/ens/ens.go +++ b/contracts/ens/ens.go @@ -151,6 +151,38 @@ func (self *ENS) Resolve(name string) (common.Hash, error) { return common.BytesToHash(ret[:]), nil } +// Addr is a non-transactional call that returns the address associated with a name. +func (self *ENS) Addr(name string) (common.Address, error) { + node := EnsNode(name) + + resolver, err := self.getResolver(node) + if err != nil { + return common.Address{}, err + } + + ret, err := resolver.Addr(node) + if err != nil { + return common.Address{}, err + } + + return common.BytesToAddress(ret[:]), nil +} + +// SetAddress sets the address associated with a name. Only works if the caller +// owns the name, and the associated resolver implements a `setAddress` function. +func (self *ENS) SetAddr(name string, addr common.Address) (*types.Transaction, error) { + node := EnsNode(name) + + resolver, err := self.getResolver(node) + if err != nil { + return nil, err + } + + opts := self.TransactOpts + opts.GasLimit = 200000 + return resolver.Contract.SetAddr(&opts, node, addr) +} + // Register registers a new domain name for the caller, making them the owner of the new name. // Only works if the registrar for the parent domain implements the FIFS registrar protocol. func (self *ENS) Register(name string) (*types.Transaction, error) { diff --git a/contracts/ens/ens_test.go b/contracts/ens/ens_test.go index 411b04197..cd64fbf15 100644 --- a/contracts/ens/ens_test.go +++ b/contracts/ens/ens_test.go @@ -22,16 +22,18 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/contracts/ens/contract" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/crypto" ) var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - name = "my name on ENS" - hash = crypto.Keccak256Hash([]byte("my content")) - addr = crypto.PubkeyToAddress(key.PublicKey) + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + name = "my name on ENS" + hash = crypto.Keccak256Hash([]byte("my content")) + addr = crypto.PubkeyToAddress(key.PublicKey) + testAddr = common.HexToAddress("0x1234123412341234123412341234123412341234") ) func TestENS(t *testing.T) { @@ -74,4 +76,19 @@ func TestENS(t *testing.T) { if vhost != hash { t.Fatalf("resolve error, expected %v, got %v", hash.Hex(), vhost.Hex()) } + + // set the address for the name + if _, err = ens.SetAddr(name, testAddr); err != nil { + t.Fatalf("can't set address: %v", err) + } + contractBackend.Commit() + + // Try to resolve the name to an address + recoveredAddr, err := ens.Addr(name) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if vhost != hash { + t.Fatalf("resolve error, expected %v, got %v", testAddr.Hex(), recoveredAddr.Hex()) + } } diff --git a/core/blockchain.go b/core/blockchain.go index fe961e0c4..f4a818f4c 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -55,6 +55,7 @@ var ( const ( bodyCacheLimit = 256 blockCacheLimit = 256 + receiptsCacheLimit = 32 maxFutureBlocks = 256 maxTimeFutureBlocks = 30 badBlockLimit = 10 @@ -111,11 +112,12 @@ type BlockChain struct { currentBlock atomic.Value // Current head of the block chain currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) - stateCache state.Database // State database to reuse between imports (contains state cache) - bodyCache *lru.Cache // Cache for the most recent block bodies - bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format - blockCache *lru.Cache // Cache for the most recent entire blocks - futureBlocks *lru.Cache // future blocks are blocks added for later processing + stateCache state.Database // State database to reuse between imports (contains state cache) + bodyCache *lru.Cache // Cache for the most recent block bodies + bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format + receiptsCache *lru.Cache // Cache for the most recent receipts per block + blockCache *lru.Cache // Cache for the most recent entire blocks + futureBlocks *lru.Cache // future blocks are blocks added for later processing quit chan struct{} // blockchain quit channel running int32 // running must be called atomically @@ -144,6 +146,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par } bodyCache, _ := lru.New(bodyCacheLimit) bodyRLPCache, _ := lru.New(bodyCacheLimit) + receiptsCache, _ := lru.New(receiptsCacheLimit) blockCache, _ := lru.New(blockCacheLimit) futureBlocks, _ := lru.New(maxFutureBlocks) badBlocks, _ := lru.New(badBlockLimit) @@ -158,6 +161,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par shouldPreserve: shouldPreserve, bodyCache: bodyCache, bodyRLPCache: bodyRLPCache, + receiptsCache: receiptsCache, blockCache: blockCache, futureBlocks: futureBlocks, engine: engine, @@ -280,6 +284,7 @@ func (bc *BlockChain) SetHead(head uint64) error { // Clear out any stale content from the caches bc.bodyCache.Purge() bc.bodyRLPCache.Purge() + bc.receiptsCache.Purge() bc.blockCache.Purge() bc.futureBlocks.Purge() @@ -603,11 +608,18 @@ func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { // GetReceiptsByHash retrieves the receipts for all transactions in a given block. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { + if receipts, ok := bc.receiptsCache.Get(hash); ok { + return receipts.(types.Receipts) + } + number := rawdb.ReadHeaderNumber(bc.db, hash) if number == nil { return nil } - return rawdb.ReadReceipts(bc.db, hash, *number) + + receipts := rawdb.ReadReceipts(bc.db, hash, *number) + bc.receiptsCache.Add(hash, receipts) + return receipts } // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. diff --git a/core/types/transaction.go b/core/types/transaction.go index 9c6e77be9..7b53cac2c 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -153,16 +153,21 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { if err := dec.UnmarshalJSON(input); err != nil { return err } - var V byte - if isProtectedV(dec.V) { - chainID := deriveChainId(dec.V).Uint64() - V = byte(dec.V.Uint64() - 35 - 2*chainID) - } else { - V = byte(dec.V.Uint64() - 27) - } - if !crypto.ValidateSignatureValues(V, dec.R, dec.S, false) { - return ErrInvalidSig + + withSignature := dec.V.Sign() != 0 || dec.R.Sign() != 0 || dec.S.Sign() != 0 + if withSignature { + var V byte + if isProtectedV(dec.V) { + chainID := deriveChainId(dec.V).Uint64() + V = byte(dec.V.Uint64() - 35 - 2*chainID) + } else { + V = byte(dec.V.Uint64() - 27) + } + if !crypto.ValidateSignatureValues(V, dec.R, dec.S, false) { + return ErrInvalidSig + } } + *tx = Transaction{data: dec} return nil } diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index c195d23a3..1997755dc 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -227,13 +227,13 @@ func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (commo if !crypto.ValidateSignatureValues(V, R, S, homestead) { return common.Address{}, ErrInvalidSig } - // encode the snature in uncompressed format + // encode the signature in uncompressed format r, s := R.Bytes(), S.Bytes() sig := make([]byte, 65) copy(sig[32-len(r):32], r) copy(sig[64-len(s):64], s) sig[64] = V - // recover the public key from the snature + // recover the public key from the signature pub, err := crypto.Ecrecover(sighash[:], sig) if err != nil { return common.Address{}, err diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index b390f45c6..f38d8e717 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -185,6 +185,7 @@ func TestTransactionJSON(t *testing.T) { } signer := NewEIP155Signer(common.Big1) + transactions := make([]*Transaction, 0, 50) for i := uint64(0); i < 25; i++ { var tx *Transaction switch i % 2 { @@ -193,20 +194,25 @@ func TestTransactionJSON(t *testing.T) { case 1: tx = NewContractCreation(i, common.Big0, 1, common.Big2, []byte("abcdef")) } + transactions = append(transactions, tx) - tx, err := SignTx(tx, signer, key) + signedTx, err := SignTx(tx, signer, key) if err != nil { t.Fatalf("could not sign transaction: %v", err) } + transactions = append(transactions, signedTx) + } + + for _, tx := range transactions { data, err := json.Marshal(tx) if err != nil { - t.Errorf("json.Marshal failed: %v", err) + t.Fatalf("json.Marshal failed: %v", err) } var parsedTx *Transaction if err := json.Unmarshal(data, &parsedTx); err != nil { - t.Errorf("json.Unmarshal failed: %v", err) + t.Fatalf("json.Unmarshal failed: %v", err) } // compare nonce, price, gaslimit, recipient, amount, payload, V, R, S diff --git a/eth/api.go b/eth/api.go index 708f75a78..3ec3afb81 100644 --- a/eth/api.go +++ b/eth/api.go @@ -67,6 +67,15 @@ func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 { return hexutil.Uint64(api.e.Miner().HashRate()) } +// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config. +func (api *PublicEthereumAPI) ChainId() hexutil.Uint64 { + chainID := new(big.Int) + if config := api.e.chainConfig; config.IsEIP155(api.e.blockchain.CurrentBlock().Number()) { + chainID = config.ChainID + } + return (hexutil.Uint64)(chainID.Uint64()) +} + // PublicMinerAPI provides an API to control the miner. // It offers only methods that operate on data that pose no security risk when it is publicly accessible. type PublicMinerAPI struct { diff --git a/eth/api_backend.go b/eth/api_backend.go index 03f6012d7..8748d444f 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -25,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -107,18 +106,11 @@ func (b *EthAPIBackend) GetBlock(ctx context.Context, hash common.Hash) (*types. } func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { - if number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash); number != nil { - return rawdb.ReadReceipts(b.eth.chainDb, hash, *number), nil - } - return nil, nil + return b.eth.blockchain.GetReceiptsByHash(hash), nil } func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { - number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash) - if number == nil { - return nil, nil - } - receipts := rawdb.ReadReceipts(b.eth.chainDb, hash, *number) + receipts := b.eth.blockchain.GetReceiptsByHash(hash) if receipts == nil { return nil, nil } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index fbde9c6ca..805195034 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -658,8 +658,10 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err continue } // Otherwise check if we already know the header or not - if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) { - number, hash = headers[i].Number.Uint64(), headers[i].Hash() + h := headers[i].Hash() + n := headers[i].Number.Uint64() + if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) { + number, hash = n, h // If every header is known, even future ones, the peer straight out lied about its head if number > height && i == limit-1 { @@ -723,11 +725,13 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err arrived = true // Modify the search interval based on the response - if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) { + h := headers[0].Hash() + n := headers[0].Number.Uint64() + if (d.mode == FullSync && !d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && !d.lightchain.HasHeader(h, n)) { end = check break } - header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists + header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists if header.Number.Uint64() != check { p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) return 0, errBadPeer diff --git a/eth/handler.go b/eth/handler.go index 551781ef0..1f62d820e 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -49,6 +49,9 @@ const ( // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 + + // minimim number of peers to broadcast new blocks to + minBroadcastPeers = 4 ) var ( @@ -705,7 +708,14 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { return } // Send the block to a subset of our peers - transfer := peers[:int(math.Sqrt(float64(len(peers))))] + transferLen := int(math.Sqrt(float64(len(peers)))) + if transferLen < minBroadcastPeers { + transferLen = minBroadcastPeers + } + if transferLen > len(peers) { + transferLen = len(peers) + } + transfer := peers[:transferLen] for _, peer := range transfer { peer.AsyncSendNewBlock(block, td) } diff --git a/eth/handler_test.go b/eth/handler_test.go index 0885a0448..7811cd480 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -17,6 +17,7 @@ package eth import ( + "fmt" "math" "math/big" "math/rand" @@ -466,14 +467,17 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool } // Create a DAO aware protocol manager var ( - evmux = new(event.TypeMux) - pow = ethash.NewFaker() - db = ethdb.NewMemDatabase() - config = ¶ms.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked} - gspec = &core.Genesis{Config: config} - genesis = gspec.MustCommit(db) - blockchain, _ = core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil) + evmux = new(event.TypeMux) + pow = ethash.NewFaker() + db = ethdb.NewMemDatabase() + config = ¶ms.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked} + gspec = &core.Genesis{Config: config} + genesis = gspec.MustCommit(db) ) + blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil) + if err != nil { + t.Fatalf("failed to create new blockchain: %v", err) + } pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db) if err != nil { t.Fatalf("failed to start test protocol manager: %v", err) @@ -520,3 +524,90 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool } } } + +func TestBroadcastBlock(t *testing.T) { + var tests = []struct { + totalPeers int + broadcastExpected int + }{ + {1, 1}, + {2, 2}, + {3, 3}, + {4, 4}, + {5, 4}, + {9, 4}, + {12, 4}, + {16, 4}, + {26, 5}, + {100, 10}, + } + for _, test := range tests { + testBroadcastBlock(t, test.totalPeers, test.broadcastExpected) + } +} + +func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) { + var ( + evmux = new(event.TypeMux) + pow = ethash.NewFaker() + db = ethdb.NewMemDatabase() + config = ¶ms.ChainConfig{} + gspec = &core.Genesis{Config: config} + genesis = gspec.MustCommit(db) + ) + blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil) + if err != nil { + t.Fatalf("failed to create new blockchain: %v", err) + } + pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db) + if err != nil { + t.Fatalf("failed to start test protocol manager: %v", err) + } + pm.Start(1000) + defer pm.Stop() + var peers []*testPeer + for i := 0; i < totalPeers; i++ { + peer, _ := newTestPeer(fmt.Sprintf("peer %d", i), eth63, pm, true) + defer peer.close() + peers = append(peers, peer) + } + chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {}) + pm.BroadcastBlock(chain[0], true /*propagate*/) + + errCh := make(chan error, totalPeers) + doneCh := make(chan struct{}, totalPeers) + for _, peer := range peers { + go func(p *testPeer) { + if err := p2p.ExpectMsg(p.app, NewBlockMsg, &newBlockData{Block: chain[0], TD: big.NewInt(131136)}); err != nil { + errCh <- err + } else { + doneCh <- struct{}{} + } + }(peer) + } + timeoutCh := time.NewTimer(time.Millisecond * 100).C + var receivedCount int +outer: + for { + select { + case err = <-errCh: + break outer + case <-doneCh: + receivedCount++ + if receivedCount == totalPeers { + break outer + } + case <-timeoutCh: + break outer + } + } + for _, peer := range peers { + peer.app.Close() + } + if err != nil { + t.Errorf("error matching block by peer: %v", err) + } + if receivedCount != broadcastExpected { + t.Errorf("block broadcast to %d peers, expected %d", receivedCount, broadcastExpected) + } +} diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 7d7eba98a..46c8fe9f8 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -25,11 +25,11 @@ import ( "runtime" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/log/term" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics/exp" "github.com/fjl/memsize/memsizeui" colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" "gopkg.in/urfave/cli.v1" ) @@ -101,7 +101,7 @@ var ( ) func init() { - usecolor := term.IsTty(os.Stderr.Fd()) && os.Getenv("TERM") != "dumb" + usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb" output := io.Writer(os.Stderr) if usecolor { output = colorable.NewColorableStderr() diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index f4eb47a12..bf4b7808f 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -434,6 +434,11 @@ web3._extend({ property: 'eth', methods: [ new web3._extend.Method({ + name: 'chainId', + call: 'eth_chainId', + params: 0 + }), + new web3._extend.Method({ name: 'sign', call: 'eth_sign', params: 2, diff --git a/les/fetcher.go b/les/fetcher.go index cc539c42b..f0d3b188d 100644 --- a/les/fetcher.go +++ b/les/fetcher.go @@ -32,8 +32,9 @@ import ( ) const ( - blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others - maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer + blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others + maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer + serverStateAvailable = 100 // number of recent blocks where state availability is assumed ) // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the @@ -215,8 +216,8 @@ func (f *lightFetcher) syncLoop() { // registerPeer adds a new peer to the fetcher's peer set func (f *lightFetcher) registerPeer(p *peer) { p.lock.Lock() - p.hasBlock = func(hash common.Hash, number uint64) bool { - return f.peerHasBlock(p, hash, number) + p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool { + return f.peerHasBlock(p, hash, number, hasState) } p.lock.Unlock() @@ -344,21 +345,27 @@ func (f *lightFetcher) announce(p *peer, head *announceData) { // peerHasBlock returns true if we can assume the peer knows the given block // based on its announcements -func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool { +func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool { f.lock.Lock() defer f.lock.Unlock() + fp := f.peers[p] + if fp == nil || fp.root == nil { + return false + } + + if hasState { + if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable { + return false + } + } + if f.syncing { // always return true when syncing // false positives are acceptable, a more sophisticated condition can be implemented later return true } - fp := f.peers[p] - if fp == nil || fp.root == nil { - return false - } - if number >= fp.root.number { // it is recent enough that if it is known, is should be in the peer's block tree return fp.nodeByHash[hash] != nil diff --git a/les/odr_requests.go b/les/odr_requests.go index 77b1b6d0c..0f2e5dd9e 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -84,7 +84,7 @@ func (r *BlockRequest) GetCost(peer *peer) uint64 { // CanSend tells if a certain peer is suitable for serving the given request func (r *BlockRequest) CanSend(peer *peer) bool { - return peer.HasBlock(r.Hash, r.Number) + return peer.HasBlock(r.Hash, r.Number, false) } // Request sends an ODR request to the LES network (implementation of LesOdrRequest) @@ -140,7 +140,7 @@ func (r *ReceiptsRequest) GetCost(peer *peer) uint64 { // CanSend tells if a certain peer is suitable for serving the given request func (r *ReceiptsRequest) CanSend(peer *peer) bool { - return peer.HasBlock(r.Hash, r.Number) + return peer.HasBlock(r.Hash, r.Number, false) } // Request sends an ODR request to the LES network (implementation of LesOdrRequest) @@ -202,7 +202,7 @@ func (r *TrieRequest) GetCost(peer *peer) uint64 { // CanSend tells if a certain peer is suitable for serving the given request func (r *TrieRequest) CanSend(peer *peer) bool { - return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber) + return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true) } // Request sends an ODR request to the LES network (implementation of LesOdrRequest) @@ -272,7 +272,7 @@ func (r *CodeRequest) GetCost(peer *peer) uint64 { // CanSend tells if a certain peer is suitable for serving the given request func (r *CodeRequest) CanSend(peer *peer) bool { - return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber) + return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true) } // Request sends an ODR request to the LES network (implementation of LesOdrRequest) diff --git a/les/odr_test.go b/les/odr_test.go index e6458adf5..ac81fbcf0 100644 --- a/les/odr_test.go +++ b/les/odr_test.go @@ -194,7 +194,7 @@ func testOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) { client.peers.Register(client.rPeer) time.Sleep(time.Millisecond * 10) // ensure that all peerSetNotify callbacks are executed client.peers.lock.Lock() - client.rPeer.hasBlock = func(common.Hash, uint64) bool { return true } + client.rPeer.hasBlock = func(common.Hash, uint64, bool) bool { return true } client.peers.lock.Unlock() test(5) // still expect all retrievals to pass, now data should be cached locally diff --git a/les/peer.go b/les/peer.go index 1f343847e..678384f0e 100644 --- a/les/peer.go +++ b/les/peer.go @@ -67,7 +67,7 @@ type peer struct { sendQueue *execQueue poolEntry *poolEntry - hasBlock func(common.Hash, uint64) bool + hasBlock func(common.Hash, uint64, bool) bool responseErrors int fcClient *flowcontrol.ClientNode // nil if the peer is server only @@ -171,11 +171,11 @@ func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 { } // HasBlock checks if the peer has a given block -func (p *peer) HasBlock(hash common.Hash, number uint64) bool { +func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool { p.lock.RLock() hasBlock := p.hasBlock p.lock.RUnlock() - return hasBlock != nil && hasBlock(hash, number) + return hasBlock != nil && hasBlock(hash, number, hasState) } // SendAnnounce announces the availability of a number of blocks through diff --git a/les/request_test.go b/les/request_test.go index f02c2a3d7..c9c185198 100644 --- a/les/request_test.go +++ b/les/request_test.go @@ -115,7 +115,7 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) { client.peers.Register(client.rPeer) time.Sleep(time.Millisecond * 10) // ensure that all peerSetNotify callbacks are executed client.rPeer.lock.Lock() - client.rPeer.hasBlock = func(common.Hash, uint64) bool { return true } + client.rPeer.hasBlock = func(common.Hash, uint64, bool) bool { return true } client.rPeer.lock.Unlock() // expect all retrievals to pass test(5) diff --git a/log/term/LICENSE b/log/term/LICENSE deleted file mode 100644 index f090cb42f..000000000 --- a/log/term/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/log/term/terminal_appengine.go b/log/term/terminal_appengine.go deleted file mode 100644 index c1b5d2a3b..000000000 --- a/log/term/terminal_appengine.go +++ /dev/null @@ -1,13 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -package term - -// IsTty always returns false on AppEngine. -func IsTty(fd uintptr) bool { - return false -} diff --git a/log/term/terminal_darwin.go b/log/term/terminal_darwin.go deleted file mode 100644 index d8f351b1b..000000000 --- a/log/term/terminal_darwin.go +++ /dev/null @@ -1,13 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// +build !appengine - -package term - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/log/term/terminal_freebsd.go b/log/term/terminal_freebsd.go deleted file mode 100644 index cfaceab33..000000000 --- a/log/term/terminal_freebsd.go +++ /dev/null @@ -1,18 +0,0 @@ -package term - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/log/term/terminal_linux.go b/log/term/terminal_linux.go deleted file mode 100644 index 5290468d6..000000000 --- a/log/term/terminal_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -package term - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/log/term/terminal_netbsd.go b/log/term/terminal_netbsd.go deleted file mode 100644 index f9bb9e1c2..000000000 --- a/log/term/terminal_netbsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package term - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/log/term/terminal_notwindows.go b/log/term/terminal_notwindows.go deleted file mode 100644 index c9af534f6..000000000 --- a/log/term/terminal_notwindows.go +++ /dev/null @@ -1,20 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!appengine darwin freebsd openbsd netbsd - -package term - -import ( - "syscall" - "unsafe" -) - -// IsTty returns true if the given file descriptor is a terminal. -func IsTty(fd uintptr) bool { - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/log/term/terminal_openbsd.go b/log/term/terminal_openbsd.go deleted file mode 100644 index f9bb9e1c2..000000000 --- a/log/term/terminal_openbsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package term - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/log/term/terminal_solaris.go b/log/term/terminal_solaris.go deleted file mode 100644 index 033c16324..000000000 --- a/log/term/terminal_solaris.go +++ /dev/null @@ -1,9 +0,0 @@ -package term - -import "golang.org/x/sys/unix" - -// IsTty returns true if the given file descriptor is a terminal. -func IsTty(fd uintptr) bool { - _, err := unix.IoctlGetTermios(int(fd), unix.TCGETA) - return err == nil -} diff --git a/log/term/terminal_windows.go b/log/term/terminal_windows.go deleted file mode 100644 index df3c30c15..000000000 --- a/log/term/terminal_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package term - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTty returns true if the given file descriptor is a terminal. -func IsTty(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/signer/core/api.go b/signer/core/api.go index 1da86991e..c380fe977 100644 --- a/signer/core/api.go +++ b/signer/core/api.go @@ -36,6 +36,9 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +// numberOfAccountsToDerive For hardware wallets, the number of accounts to derive +const numberOfAccountsToDerive = 10 + // ExternalAPI defines the external API through which signing requests are made. type ExternalAPI interface { // List available accounts @@ -79,6 +82,9 @@ type SignerUI interface { // OnSignerStartup is invoked when the signer boots, and tells the UI info about external API location and version // information OnSignerStartup(info StartupInfo) + // OnInputRequried is invoked when clef requires user input, for example master password or + // pin-code for unlocking hardware wallets + OnInputRequired(info UserInputRequest) (UserInputResponse, error) } // SignerAPI defines the actual implementation of ExternalAPI @@ -194,6 +200,14 @@ type ( StartupInfo struct { Info map[string]interface{} `json:"info"` } + UserInputRequest struct { + Prompt string `json:"prompt"` + Title string `json:"title"` + IsPassword bool `json:"isPassword"` + } + UserInputResponse struct { + Text string `json:"text"` + } ) var ErrRequestDenied = errors.New("Request denied") @@ -215,6 +229,9 @@ func NewSignerAPI(chainID int64, ksLocation string, noUSB bool, ui SignerUI, abi if len(ksLocation) > 0 { backends = append(backends, keystore.NewKeyStore(ksLocation, n, p)) } + if advancedMode { + log.Info("Clef is in advanced mode: will warn instead of reject") + } if !noUSB { // Start a USB hub for Ledger hardware wallets if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil { @@ -231,10 +248,94 @@ func NewSignerAPI(chainID int64, ksLocation string, noUSB bool, ui SignerUI, abi log.Debug("Trezor support enabled") } } - if advancedMode { - log.Info("Clef is in advanced mode: will warn instead of reject") + signer := &SignerAPI{big.NewInt(chainID), accounts.NewManager(backends...), ui, NewValidator(abidb), !advancedMode} + if !noUSB { + signer.startUSBListener() + } + return signer +} +func (api *SignerAPI) openTrezor(url accounts.URL) { + resp, err := api.UI.OnInputRequired(UserInputRequest{ + Prompt: "Pin required to open Trezor wallet\n" + + "Look at the device for number positions\n\n" + + "7 | 8 | 9\n" + + "--+---+--\n" + + "4 | 5 | 6\n" + + "--+---+--\n" + + "1 | 2 | 3\n\n", + IsPassword: true, + Title: "Trezor unlock", + }) + if err != nil { + log.Warn("failed getting trezor pin", "err", err) + return + } + // We're using the URL instead of the pointer to the + // Wallet -- perhaps it is not actually present anymore + w, err := api.am.Wallet(url.String()) + if err != nil { + log.Warn("wallet unavailable", "url", url) + return + } + err = w.Open(resp.Text) + if err != nil { + log.Warn("failed to open wallet", "wallet", url, "err", err) + return } - return &SignerAPI{big.NewInt(chainID), accounts.NewManager(backends...), ui, NewValidator(abidb), !advancedMode} + +} + +// startUSBListener starts a listener for USB events, for hardware wallet interaction +func (api *SignerAPI) startUSBListener() { + events := make(chan accounts.WalletEvent, 16) + am := api.am + am.Subscribe(events) + go func() { + + // Open any wallets already attached + for _, wallet := range am.Wallets() { + if err := wallet.Open(""); err != nil { + log.Warn("Failed to open wallet", "url", wallet.URL(), "err", err) + if err == usbwallet.ErrTrezorPINNeeded { + go api.openTrezor(wallet.URL()) + } + } + } + // Listen for wallet event till termination + for event := range events { + switch event.Kind { + case accounts.WalletArrived: + if err := event.Wallet.Open(""); err != nil { + log.Warn("New wallet appeared, failed to open", "url", event.Wallet.URL(), "err", err) + if err == usbwallet.ErrTrezorPINNeeded { + go api.openTrezor(event.Wallet.URL()) + } + } + case accounts.WalletOpened: + status, _ := event.Wallet.Status() + log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status) + + derivationPath := accounts.DefaultBaseDerivationPath + if event.Wallet.URL().Scheme == "ledger" { + derivationPath = accounts.DefaultLedgerBaseDerivationPath + } + var nextPath = derivationPath + // Derive first N accounts, hardcoded for now + for i := 0; i < numberOfAccountsToDerive; i++ { + acc, err := event.Wallet.Derive(nextPath, true) + if err != nil { + log.Warn("account derivation failed", "error", err) + } else { + log.Info("derived account", "address", acc.Address) + } + nextPath[len(nextPath)-1]++ + } + case accounts.WalletDropped: + log.Info("Old wallet dropped", "url", event.Wallet.URL()) + event.Wallet.Close() + } + } + }() } // List returns the set of wallet this signer manages. Each wallet can contain diff --git a/signer/core/api_test.go b/signer/core/api_test.go index 70aa9aa94..a8aa23896 100644 --- a/signer/core/api_test.go +++ b/signer/core/api_test.go @@ -19,6 +19,7 @@ package core import ( "bytes" "context" + "errors" "fmt" "io/ioutil" "math/big" @@ -41,6 +42,10 @@ type HeadlessUI struct { controller chan string } +func (ui *HeadlessUI) OnInputRequired(info UserInputRequest) (UserInputResponse, error) { + return UserInputResponse{}, errors.New("not implemented") +} + func (ui *HeadlessUI) OnSignerStartup(info StartupInfo) { } diff --git a/signer/core/cliui.go b/signer/core/cliui.go index cc237612e..940f1f43a 100644 --- a/signer/core/cliui.go +++ b/signer/core/cliui.go @@ -83,6 +83,22 @@ func (ui *CommandlineUI) readPasswordText(inputstring string) string { return string(text) } +func (ui *CommandlineUI) OnInputRequired(info UserInputRequest) (UserInputResponse, error) { + fmt.Println(info.Title) + fmt.Println(info.Prompt) + if info.IsPassword { + text, err := terminal.ReadPassword(int(os.Stdin.Fd())) + if err != nil { + log.Error("Failed to read password", "err", err) + } + fmt.Println("-----------------------") + return UserInputResponse{string(text)}, err + } + text := ui.readString() + fmt.Println("-----------------------") + return UserInputResponse{text}, nil +} + // confirm returns true if user enters 'Yes', otherwise false func (ui *CommandlineUI) confirm() bool { fmt.Printf("Approve? [y/N]:\n") diff --git a/signer/core/stdioui.go b/signer/core/stdioui.go index 5640ed03b..64032386f 100644 --- a/signer/core/stdioui.go +++ b/signer/core/stdioui.go @@ -111,3 +111,11 @@ func (ui *StdIOUI) OnSignerStartup(info StartupInfo) { log.Info("Error calling 'OnSignerStartup'", "exc", err.Error(), "info", info) } } +func (ui *StdIOUI) OnInputRequired(info UserInputRequest) (UserInputResponse, error) { + var result UserInputResponse + err := ui.dispatch("OnInputRequired", info, &result) + if err != nil { + log.Info("Error calling 'OnInputRequired'", "exc", err.Error(), "info", info) + } + return result, err +} diff --git a/signer/rules/rules.go b/signer/rules/rules.go index 711e2ddde..07c34db22 100644 --- a/signer/rules/rules.go +++ b/signer/rules/rules.go @@ -194,6 +194,11 @@ func (r *rulesetUI) ApproveImport(request *core.ImportRequest) (core.ImportRespo return r.next.ApproveImport(request) } +// OnInputRequired not handled by rules +func (r *rulesetUI) OnInputRequired(info core.UserInputRequest) (core.UserInputResponse, error) { + return r.next.OnInputRequired(info) +} + func (r *rulesetUI) ApproveListing(request *core.ListRequest) (core.ListResponse, error) { jsonreq, err := json.Marshal(request) approved, err := r.checkApproval("ApproveListing", jsonreq, err) @@ -222,6 +227,7 @@ func (r *rulesetUI) ShowInfo(message string) { log.Info(message) r.next.ShowInfo(message) } + func (r *rulesetUI) OnSignerStartup(info core.StartupInfo) { jsonInfo, err := json.Marshal(info) if err != nil { diff --git a/signer/rules/rules_test.go b/signer/rules/rules_test.go index b6060eba7..c2f92d51f 100644 --- a/signer/rules/rules_test.go +++ b/signer/rules/rules_test.go @@ -74,6 +74,10 @@ func mixAddr(a string) (*common.MixedcaseAddress, error) { type alwaysDenyUI struct{} +func (alwaysDenyUI) OnInputRequired(info core.UserInputRequest) (core.UserInputResponse, error) { + return core.UserInputResponse{}, nil +} + func (alwaysDenyUI) OnSignerStartup(info core.StartupInfo) { } @@ -200,6 +204,11 @@ type dummyUI struct { calls []string } +func (d *dummyUI) OnInputRequired(info core.UserInputRequest) (core.UserInputResponse, error) { + d.calls = append(d.calls, "OnInputRequired") + return core.UserInputResponse{}, nil +} + func (d *dummyUI) ApproveTx(request *core.SignTxRequest) (core.SignTxResponse, error) { d.calls = append(d.calls, "ApproveTx") return core.SignTxResponse{}, core.ErrRequestDenied @@ -509,6 +518,11 @@ type dontCallMe struct { t *testing.T } +func (d *dontCallMe) OnInputRequired(info core.UserInputRequest) (core.UserInputResponse, error) { + d.t.Fatalf("Did not expect next-handler to be called") + return core.UserInputResponse{}, nil +} + func (d *dontCallMe) OnSignerStartup(info core.StartupInfo) { } diff --git a/swarm/api/api.go b/swarm/api/api.go index d7b6d8419..7b8f04c13 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -16,6 +16,9 @@ package api +//go:generate mimegen --types=./../../cmd/swarm/mimegen/mime.types --package=api --out=gen_mime.go +//go:generate gofmt -s -w gen_mime.go + import ( "archive/tar" "context" @@ -43,7 +46,8 @@ import ( "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/mru" - opentracing "github.com/opentracing/opentracing-go" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/opentracing/opentracing-go" ) var ( @@ -401,77 +405,54 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage // we need to do some extra work if this is a mutable resource manifest if entry.ContentType == ResourceContentType { - - // get the resource rootAddr - log.Trace("resource type", "menifestAddr", manifestAddr, "hash", entry.Hash) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - rootAddr := storage.Address(common.FromHex(entry.Hash)) - rsrc, err := a.resource.Load(ctx, rootAddr) + if entry.ResourceView == nil { + return reader, mimeType, status, nil, fmt.Errorf("Cannot decode ResourceView in manifest") + } + _, err := a.resource.Lookup(ctx, mru.NewQueryLatest(entry.ResourceView, lookup.NoClue)) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound log.Debug(fmt.Sprintf("get resource content error: %v", err)) return reader, mimeType, status, nil, err } + // get the data of the update + _, rsrcData, err := a.resource.GetContent(entry.ResourceView) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Warn(fmt.Sprintf("get resource content error: %v", err)) + return reader, mimeType, status, nil, err + } - // use this key to retrieve the latest update - params := mru.LookupLatest(rootAddr) - rsrc, err = a.resource.Lookup(ctx, params) + // extract multihash + decodedMultihash, err := multihash.FromMultihash(rsrcData) + if err != nil { + apiGetInvalid.Inc(1) + status = http.StatusUnprocessableEntity + log.Warn("invalid resource multihash", "err", err) + return reader, mimeType, status, nil, err + } + manifestAddr = storage.Address(decodedMultihash) + log.Trace("resource is multihash", "key", manifestAddr) + + // get the manifest the multihash digest points to + trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound - log.Debug(fmt.Sprintf("get resource content error: %v", err)) + log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err)) return reader, mimeType, status, nil, err } - // if it's multihash, we will transparently serve the content this multihash points to - // \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper - if rsrc.Multihash() { - - // get the data of the update - _, rsrcData, err := a.resource.GetContent(rootAddr) - if err != nil { - apiGetNotFound.Inc(1) - status = http.StatusNotFound - log.Warn(fmt.Sprintf("get resource content error: %v", err)) - return reader, mimeType, status, nil, err - } - - // validate that data as multihash - decodedMultihash, err := multihash.FromMultihash(rsrcData) - if err != nil { - apiGetInvalid.Inc(1) - status = http.StatusUnprocessableEntity - log.Warn("invalid resource multihash", "err", err) - return reader, mimeType, status, nil, err - } - manifestAddr = storage.Address(decodedMultihash) - log.Trace("resource is multihash", "key", manifestAddr) - - // get the manifest the multihash digest points to - trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt) - if err != nil { - apiGetNotFound.Inc(1) - status = http.StatusNotFound - log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err)) - return reader, mimeType, status, nil, err - } - - // finally, get the manifest entry - // it will always be the entry on path "" - entry, _ = trie.getEntry(path) - if entry == nil { - status = http.StatusNotFound - apiGetNotFound.Inc(1) - err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path) - log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path) - return reader, mimeType, status, nil, err - } - - } else { - // data is returned verbatim since it's not a multihash - return rsrc, "application/octet-stream", http.StatusOK, nil, nil + // finally, get the manifest entry + // it will always be the entry on path "" + entry, _ = trie.getEntry(path) + if entry == nil { + status = http.StatusNotFound + apiGetNotFound.Inc(1) + err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path) + log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path) + return reader, mimeType, status, nil, err } } @@ -778,9 +759,14 @@ func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestP // add the entry under the path from the request manifestPath := path.Join(manifestPath, hdr.Name) + contentType := hdr.Xattrs["user.swarm.content-type"] + if contentType == "" { + contentType = mime.TypeByExtension(filepath.Ext(hdr.Name)) + } + //DetectContentType("") entry := &ManifestEntry{ Path: manifestPath, - ContentType: hdr.Xattrs["user.swarm.content-type"], + ContentType: contentType, Mode: hdr.Mode, Size: hdr.Size, ModTime: hdr.ModTime, @@ -791,10 +777,15 @@ func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestP return nil, fmt.Errorf("error adding manifest entry from tar stream: %s", err) } if hdr.Name == defaultPath { + contentType := hdr.Xattrs["user.swarm.content-type"] + if contentType == "" { + contentType = mime.TypeByExtension(filepath.Ext(hdr.Name)) + } + entry := &ManifestEntry{ Hash: contentKey.Hex(), Path: "", // default entry - ContentType: hdr.Xattrs["user.swarm.content-type"], + ContentType: contentType, Mode: hdr.Mode, Size: hdr.Size, ModTime: hdr.ModTime, @@ -966,37 +957,27 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver } // ResourceLookup finds mutable resource updates at specific periods and versions -func (a *API) ResourceLookup(ctx context.Context, params *mru.LookupParams) (string, []byte, error) { - var err error - rsrc, err := a.resource.Load(ctx, params.RootAddr()) +func (a *API) ResourceLookup(ctx context.Context, query *mru.Query) ([]byte, error) { + _, err := a.resource.Lookup(ctx, query) if err != nil { - return "", nil, err - } - _, err = a.resource.Lookup(ctx, params) - if err != nil { - return "", nil, err + return nil, err } var data []byte - _, data, err = a.resource.GetContent(params.RootAddr()) + _, data, err = a.resource.GetContent(&query.View) if err != nil { - return "", nil, err + return nil, err } - return rsrc.Name(), data, nil -} - -// Create Mutable resource -func (a *API) ResourceCreate(ctx context.Context, request *mru.Request) error { - return a.resource.New(ctx, request) + return data, nil } // ResourceNewRequest creates a Request object to update a specific mutable resource -func (a *API) ResourceNewRequest(ctx context.Context, rootAddr storage.Address) (*mru.Request, error) { - return a.resource.NewUpdateRequest(ctx, rootAddr) +func (a *API) ResourceNewRequest(ctx context.Context, view *mru.View) (*mru.Request, error) { + return a.resource.NewRequest(ctx, view) } // ResourceUpdate updates a Mutable Resource with arbitrary data. // Upon retrieval the update will be retrieved verbatim as bytes. -func (a *API) ResourceUpdate(ctx context.Context, request *mru.SignedResourceUpdate) (storage.Address, error) { +func (a *API) ResourceUpdate(ctx context.Context, request *mru.Request) (storage.Address, error) { return a.resource.Update(ctx, request) } @@ -1005,17 +986,91 @@ func (a *API) ResourceHashSize() int { return a.resource.HashSize } -// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk. -func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (storage.Address, error) { +// ErrCannotLoadResourceManifest is returned when looking up a resource manifest fails +var ErrCannotLoadResourceManifest = errors.New("Cannot load resource manifest") + +// ErrNotAResourceManifest is returned when the address provided returned something other than a valid manifest +var ErrNotAResourceManifest = errors.New("Not a resource manifest") + +// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the Resource's view ID. +func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (*mru.View, error) { trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { - return nil, fmt.Errorf("cannot load resource manifest: %v", err) + return nil, ErrCannotLoadResourceManifest } entry, _ := trie.getEntry("") if entry.ContentType != ResourceContentType { - return nil, fmt.Errorf("not a resource manifest: %s", addr) + return nil, ErrNotAResourceManifest + } + + return entry.ResourceView, nil +} + +// ErrCannotResolveResourceURI is returned when the ENS resolver is not able to translate a name to a resource +var ErrCannotResolveResourceURI = errors.New("Cannot resolve Resource URI") + +// ErrCannotResolveResourceView is returned when values provided are not enough or invalid to recreate a +// resource view out of them. +var ErrCannotResolveResourceView = errors.New("Cannot resolve resource view") + +// ResolveResourceView attempts to extract View information out of the manifest, if provided +// If not, it attempts to extract the View out of a set of key-value pairs +func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Values) (*mru.View, error) { + var view *mru.View + var err error + if uri.Addr != "" { + // resolve the content key. + manifestAddr := uri.Address() + if manifestAddr == nil { + manifestAddr, err = a.Resolve(ctx, uri.Addr) + if err != nil { + return nil, ErrCannotResolveResourceURI + } + } + + // get the resource view from the manifest + view, err = a.ResolveResourceManifest(ctx, manifestAddr) + if err != nil { + return nil, err + } + log.Debug("handle.get.resource: resolved", "manifestkey", manifestAddr, "view", view.Hex()) + } else { + var v mru.View + if err := v.FromValues(values); err != nil { + return nil, ErrCannotResolveResourceView + + } + view = &v + } + return view, nil +} + +// MimeOctetStream default value of http Content-Type header +const MimeOctetStream = "application/octet-stream" + +// DetectContentType by file file extension, or fallback to content sniff +func DetectContentType(fileName string, f io.ReadSeeker) (string, error) { + ctype := mime.TypeByExtension(filepath.Ext(fileName)) + if ctype != "" { + return ctype, nil + } + + // save/rollback to get content probe from begin of file + currentPosition, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err) + } + + // read a chunk to decide between utf-8 text and binary + var buf [512]byte + n, _ := f.Read(buf[:]) + ctype = http.DetectContentType(buf[:n]) + + _, err = f.Seek(currentPosition, io.SeekStart) // rewind to output whole file + if err != nil { + return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err) } - return storage.Address(common.FromHex(entry.Hash)), nil + return ctype, nil } diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go index a65bf07e2..eb896f32a 100644 --- a/swarm/api/api_test.go +++ b/swarm/api/api_test.go @@ -17,6 +17,7 @@ package api import ( + "bytes" "context" "errors" "flag" @@ -433,3 +434,69 @@ func TestDecryptOrigin(t *testing.T) { } } } + +func TestDetectContentType(t *testing.T) { + for _, tc := range []struct { + file string + content string + expectedContentType string + }{ + { + file: "file-with-correct-css.css", + content: "body {background-color: orange}", + expectedContentType: "text/css; charset=utf-8", + }, + { + file: "empty-file.css", + content: "", + expectedContentType: "text/css; charset=utf-8", + }, + { + file: "empty-file.pdf", + content: "", + expectedContentType: "application/pdf", + }, + { + file: "empty-file.md", + content: "", + expectedContentType: "text/markdown; charset=utf-8", + }, + { + file: "empty-file-with-unknown-content.strangeext", + content: "", + expectedContentType: "text/plain; charset=utf-8", + }, + { + file: "file-with-unknown-extension-and-content.strangeext", + content: "Lorem Ipsum", + expectedContentType: "text/plain; charset=utf-8", + }, + { + file: "file-no-extension", + content: "Lorem Ipsum", + expectedContentType: "text/plain; charset=utf-8", + }, + { + file: "file-no-extension-no-content", + content: "", + expectedContentType: "text/plain; charset=utf-8", + }, + { + file: "css-file-with-html-inside.css", + content: "<!doctype html><html><head></head><body></body></html>", + expectedContentType: "text/css; charset=utf-8", + }, + } { + t.Run(tc.file, func(t *testing.T) { + detected, err := DetectContentType(tc.file, bytes.NewReader([]byte(tc.content))) + if err != nil { + t.Fatal(err) + } + + if detected != tc.expectedContentType { + t.Fatalf("File: %s, Expected mime type %s, got %s", tc.file, tc.expectedContentType, detected) + } + + }) + } +} diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index 3d06e9e1c..47a6980de 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -24,10 +24,10 @@ import ( "fmt" "io" "io/ioutil" - "mime" "mime/multipart" "net/http" "net/textproto" + "net/url" "os" "path/filepath" "regexp" @@ -123,10 +123,16 @@ func Open(path string) (*File, error) { f.Close() return nil, err } + + contentType, err := api.DetectContentType(f.Name(), f) + if err != nil { + return nil, err + } + return &File{ ReadCloser: f, ManifestEntry: api.ManifestEntry{ - ContentType: mime.TypeByExtension(filepath.Ext(path)), + ContentType: contentType, Mode: int64(stat.Mode()), Size: stat.Size(), ModTime: stat.ModTime(), @@ -595,13 +601,16 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error) return string(data), nil } +// ErrNoResourceUpdatesFound is returned when Swarm cannot find updates of the given resource +var ErrNoResourceUpdatesFound = errors.New("No updates found for this resource") + // CreateResource creates a Mutable Resource with the given name and frequency, initializing it with the provided // data. Data is interpreted as multihash or not depending on the multihash parameter. // startTime=0 means "now" // Returns the resulting Mutable Resource manifest address that you can use to include in an ENS Resolver (setContent) // or reference future updates (Client.UpdateResource) func (c *Client) CreateResource(request *mru.Request) (string, error) { - responseStream, err := c.updateResource(request) + responseStream, err := c.updateResource(request, true) if err != nil { return "", err } @@ -621,17 +630,24 @@ func (c *Client) CreateResource(request *mru.Request) (string, error) { // UpdateResource allows you to set a new version of your content func (c *Client) UpdateResource(request *mru.Request) error { - _, err := c.updateResource(request) + _, err := c.updateResource(request, false) return err } -func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) { - body, err := request.MarshalJSON() +func (c *Client) updateResource(request *mru.Request, createManifest bool) (io.ReadCloser, error) { + URL, err := url.Parse(c.Gateway) if err != nil { return nil, err } + URL.Path = "/bzz-resource:/" + values := URL.Query() + body := request.AppendValues(values) + if createManifest { + values.Set("manifest", "1") + } + URL.RawQuery = values.Encode() - req, err := http.NewRequest("POST", c.Gateway+"/bzz-resource:/", bytes.NewBuffer(body)) + req, err := http.NewRequest("POST", URL.String(), bytes.NewBuffer(body)) if err != nil { return nil, err } @@ -642,28 +658,61 @@ func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) { } return res.Body, nil - } // GetResource returns a byte stream with the raw content of the resource // manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver // points to that address -func (c *Client) GetResource(manifestAddressOrDomain string) (io.ReadCloser, error) { +func (c *Client) GetResource(query *mru.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { + return c.getResource(query, manifestAddressOrDomain, false) +} - res, err := http.Get(c.Gateway + "/bzz-resource:/" + manifestAddressOrDomain) +// getResource returns a byte stream with the raw content of the resource +// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver +// points to that address +// meta set to true will instruct the node return resource metainformation instead +func (c *Client) getResource(query *mru.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { + URL, err := url.Parse(c.Gateway) if err != nil { return nil, err } - return res.Body, nil + URL.Path = "/bzz-resource:/" + manifestAddressOrDomain + values := URL.Query() + if query != nil { + query.AppendValues(values) //adds query parameters + } + if meta { + values.Set("meta", "1") + } + URL.RawQuery = values.Encode() + res, err := http.Get(URL.String()) + if err != nil { + return nil, err + } + + if res.StatusCode != http.StatusOK { + if res.StatusCode == http.StatusNotFound { + return nil, ErrNoResourceUpdatesFound + } + errorMessageBytes, err := ioutil.ReadAll(res.Body) + var errorMessage string + if err != nil { + errorMessage = "cannot retrieve error message: " + err.Error() + } else { + errorMessage = string(errorMessageBytes) + } + return nil, fmt.Errorf("Error retrieving resource: %s", errorMessage) + } + return res.Body, nil } // GetResourceMetadata returns a structure that describes the Mutable Resource // manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver // points to that address -func (c *Client) GetResourceMetadata(manifestAddressOrDomain string) (*mru.Request, error) { +func (c *Client) GetResourceMetadata(query *mru.Query, manifestAddressOrDomain string) (*mru.Request, error) { - responseStream, err := c.GetResource(manifestAddressOrDomain + "/meta") + responseStream, err := c.getResource(query, manifestAddressOrDomain, true) if err != nil { return nil, err } diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index f9312d48f..02980de1d 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -25,6 +25,8 @@ import ( "sort" "testing" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/swarm/api" @@ -391,19 +393,12 @@ func TestClientCreateResourceMultihash(t *testing.T) { s := common.FromHex(swarmHash) mh := multihash.ToMultihash(s) - // our mutable resource "name" - resourceName := "foo.eth" + // our mutable resource topic + topic, _ := mru.NewTopic("foo.eth", nil) - createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ - Name: resourceName, - Frequency: 13, - StartTime: srv.GetCurrentTime(), - Owner: signer.Address(), - }) - if err != nil { - t.Fatal(err) - } - createRequest.SetData(mh, true) + createRequest := mru.NewFirstRequest(topic) + + createRequest.SetData(mh) if err := createRequest.Sign(signer); err != nil { t.Fatalf("Error signing update: %s", err) } @@ -414,12 +409,18 @@ func TestClientCreateResourceMultihash(t *testing.T) { t.Fatalf("Error creating resource: %s", err) } - correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e" + correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" if resourceManifestHash != correctManifestAddrHex { - t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) + t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) } - reader, err := client.GetResource(correctManifestAddrHex) + // Check we get a not found error when trying to get the resource with a made-up manifest + _, err = client.GetResource(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + if err != ErrNoResourceUpdatesFound { + t.Fatalf("Expected to receive ErrNoResourceUpdatesFound error. Got: %s", err) + } + + reader, err := client.GetResource(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving resource: %s", err) } @@ -447,30 +448,22 @@ func TestClientCreateUpdateResource(t *testing.T) { databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...") // our mutable resource name - resourceName := "El Quijote" + topic, _ := mru.NewTopic("El Quijote", nil) + createRequest := mru.NewFirstRequest(topic) - createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ - Name: resourceName, - Frequency: 13, - StartTime: srv.GetCurrentTime(), - Owner: signer.Address(), - }) - if err != nil { - t.Fatal(err) - } - createRequest.SetData(databytes, false) + createRequest.SetData(databytes) if err := createRequest.Sign(signer); err != nil { t.Fatalf("Error signing update: %s", err) } resourceManifestHash, err := client.CreateResource(createRequest) - correctManifestAddrHex := "cc7904c17b49f9679e2d8006fe25e87e3f5c2072c2b49cab50f15e544471b30a" + correctManifestAddrHex := "fcb8e75f53e480e197c083ad1976d265674d0ce776f2bf359c09c413fb5230b8" if resourceManifestHash != correctManifestAddrHex { - t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) + t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) } - reader, err := client.GetResource(correctManifestAddrHex) + reader, err := client.GetResource(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving resource: %s", err) } @@ -486,12 +479,12 @@ func TestClientCreateUpdateResource(t *testing.T) { // define different data databytes = []byte("... no ha mucho tiempo que vivÃa un hidalgo de los de lanza en astillero ...") - updateRequest, err := client.GetResourceMetadata(correctManifestAddrHex) + updateRequest, err := client.GetResourceMetadata(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving update request template: %s", err) } - updateRequest.SetData(databytes, false) + updateRequest.SetData(databytes) if err := updateRequest.Sign(signer); err != nil { t.Fatalf("Error signing update: %s", err) } @@ -500,7 +493,7 @@ func TestClientCreateUpdateResource(t *testing.T) { t.Fatalf("Error updating resource: %s", err) } - reader, err = client.GetResource(correctManifestAddrHex) + reader, err = client.GetResource(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving resource: %s", err) } @@ -513,4 +506,24 @@ func TestClientCreateUpdateResource(t *testing.T) { t.Fatalf("Expected: %v, got %v", databytes, gotData) } + // now try retrieving resource without a manifest + + view := &mru.View{ + Topic: topic, + User: signer.Address(), + } + + lookupParams := mru.NewQueryLatest(view, lookup.NoClue) + reader, err = client.GetResource(lookupParams, "") + if err != nil { + t.Fatalf("Error retrieving resource: %s", err) + } + defer reader.Close() + gotData, err = ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(databytes, gotData) { + t.Fatalf("Expected: %v, got %v", databytes, gotData) + } } diff --git a/swarm/api/config.go b/swarm/api/config.go index e753890e4..be7385408 100644 --- a/swarm/api/config.go +++ b/swarm/api/config.go @@ -50,26 +50,27 @@ type Config struct { Swap *swap.LocalProfile Pss *pss.PssParams //*network.SyncParams - Contract common.Address - EnsRoot common.Address - EnsAPIs []string - Path string - ListenAddr string - Port string - PublicKey string - BzzKey string - NodeID string - NetworkID uint64 - SwapEnabled bool - SyncEnabled bool - SyncingSkipCheck bool - DeliverySkipCheck bool - LightNodeEnabled bool - SyncUpdateDelay time.Duration - SwapAPI string - Cors string - BzzAccount string - privateKey *ecdsa.PrivateKey + Contract common.Address + EnsRoot common.Address + EnsAPIs []string + Path string + ListenAddr string + Port string + PublicKey string + BzzKey string + NodeID string + NetworkID uint64 + SwapEnabled bool + SyncEnabled bool + SyncingSkipCheck bool + DeliverySkipCheck bool + MaxStreamPeerServers int + LightNodeEnabled bool + SyncUpdateDelay time.Duration + SwapAPI string + Cors string + BzzAccount string + privateKey *ecdsa.PrivateKey } //create a default config with all parameters to set to defaults @@ -80,20 +81,21 @@ func NewConfig() (c *Config) { FileStoreParams: storage.NewFileStoreParams(), HiveParams: network.NewHiveParams(), //SyncParams: network.NewDefaultSyncParams(), - Swap: swap.NewDefaultSwapParams(), - Pss: pss.NewPssParams(), - ListenAddr: DefaultHTTPListenAddr, - Port: DefaultHTTPPort, - Path: node.DefaultDataDir(), - EnsAPIs: nil, - EnsRoot: ens.TestNetAddress, - NetworkID: network.DefaultNetworkID, - SwapEnabled: false, - SyncEnabled: true, - SyncingSkipCheck: false, - DeliverySkipCheck: true, - SyncUpdateDelay: 15 * time.Second, - SwapAPI: "", + Swap: swap.NewDefaultSwapParams(), + Pss: pss.NewPssParams(), + ListenAddr: DefaultHTTPListenAddr, + Port: DefaultHTTPPort, + Path: node.DefaultDataDir(), + EnsAPIs: nil, + EnsRoot: ens.TestNetAddress, + NetworkID: network.DefaultNetworkID, + SwapEnabled: false, + SyncEnabled: true, + SyncingSkipCheck: false, + MaxStreamPeerServers: 10000, + DeliverySkipCheck: true, + SyncUpdateDelay: 15 * time.Second, + SwapAPI: "", } return diff --git a/swarm/api/filesystem.go b/swarm/api/filesystem.go index 8251ebc4d..43695efc1 100644 --- a/swarm/api/filesystem.go +++ b/swarm/api/filesystem.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "io" - "net/http" "os" "path" "path/filepath" @@ -97,51 +96,50 @@ func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error list = append(list, entry) } - cnt := len(list) - errors := make([]error, cnt) - done := make(chan bool, maxParallelFiles) - dcnt := 0 - awg := &sync.WaitGroup{} + errors := make([]error, len(list)) + sem := make(chan bool, maxParallelFiles) + defer close(sem) for i, entry := range list { - if i >= dcnt+maxParallelFiles { - <-done - dcnt++ - } - awg.Add(1) - go func(i int, entry *manifestTrieEntry, done chan bool) { + sem <- true + go func(i int, entry *manifestTrieEntry) { + defer func() { <-sem }() + f, err := os.Open(entry.Path) - if err == nil { - stat, _ := f.Stat() - var hash storage.Address - var wait func(context.Context) error - ctx := context.TODO() - hash, wait, err = fs.api.fileStore.Store(ctx, f, stat.Size(), toEncrypt) - if hash != nil { - list[i].Hash = hash.Hex() - } - err = wait(ctx) - awg.Done() - if err == nil { - first512 := make([]byte, 512) - fread, _ := f.ReadAt(first512, 0) - if fread > 0 { - mimeType := http.DetectContentType(first512[:fread]) - if filepath.Ext(entry.Path) == ".css" { - mimeType = "text/css" - } - list[i].ContentType = mimeType - } - } - f.Close() + if err != nil { + errors[i] = err + return + } + defer f.Close() + + stat, err := f.Stat() + if err != nil { + errors[i] = err + return + } + + var hash storage.Address + var wait func(context.Context) error + ctx := context.TODO() + hash, wait, err = fs.api.fileStore.Store(ctx, f, stat.Size(), toEncrypt) + if hash != nil { + list[i].Hash = hash.Hex() } - errors[i] = err - done <- true - }(i, entry, done) + if err := wait(ctx); err != nil { + errors[i] = err + return + } + + list[i].ContentType, err = DetectContentType(f.Name(), f) + if err != nil { + errors[i] = err + return + } + + }(i, entry) } - for dcnt < cnt { - <-done - dcnt++ + for i := 0; i < cap(sem); i++ { + sem <- true } trie := &manifestTrie{ @@ -168,7 +166,6 @@ func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error if err2 == nil { hs = trie.ref.Hex() } - awg.Wait() return hs, err2 } diff --git a/swarm/api/filesystem_test.go b/swarm/api/filesystem_test.go index fe7527b1f..02f5bff65 100644 --- a/swarm/api/filesystem_test.go +++ b/swarm/api/filesystem_test.go @@ -60,7 +60,7 @@ func TestApiDirUpload0(t *testing.T) { content = readPath(t, "testdata", "test0", "index.css") resp = testGet(t, api, bzzhash, "index.css") - exp = expResponse(content, "text/css", 0) + exp = expResponse(content, "text/css; charset=utf-8", 0) checkResponse(t, resp, exp) addr := storage.Address(common.Hex2Bytes(bzzhash)) @@ -140,7 +140,7 @@ func TestApiDirUploadModify(t *testing.T) { content = readPath(t, "testdata", "test0", "index.css") resp = testGet(t, api, bzzhash, "index.css") - exp = expResponse(content, "text/css", 0) + exp = expResponse(content, "text/css; charset=utf-8", 0) checkResponse(t, resp, exp) _, _, _, _, err = api.Get(context.TODO(), nil, addr, "") diff --git a/swarm/api/gen_mime.go b/swarm/api/gen_mime.go new file mode 100644 index 000000000..109edeb50 --- /dev/null +++ b/swarm/api/gen_mime.go @@ -0,0 +1,1201 @@ +// Code generated by github.com/ethereum/go-ethereum/cmd/swarm/mimegen. DO NOT EDIT. + +package api + +import "mime" + +func init() { + var mimeTypes = map[string]string{ + ".a2l": "application/A2L", + ".aml": "application/AML", + ".ez": "application/andrew-inset", + ".atf": "application/ATF", + ".atfx": "application/ATFX", + ".atxml": "application/ATXML", + ".atom": "application/atom+xml", + ".atomcat": "application/atomcat+xml", + ".atomdeleted": "application/atomdeleted+xml", + ".atomsvc": "application/atomsvc+xml", + ".apxml": "application/auth-policy+xml", + ".xdd": "application/bacnet-xdd+zip", + ".xcs": "application/calendar+xml", + ".cbor": "application/cbor", + ".ccmp": "application/ccmp+xml", + ".ccxml": "application/ccxml+xml", + ".cdfx": "application/CDFX+XML", + ".cdmia": "application/cdmi-capability", + ".cdmic": "application/cdmi-container", + ".cdmid": "application/cdmi-domain", + ".cdmio": "application/cdmi-object", + ".cdmiq": "application/cdmi-queue", + ".cea": "application/CEA", + ".cellml": "application/cellml+xml", + ".cml": "application/cellml+xml", + ".clue": "application/clue_info+xml", + ".cmsc": "application/cms", + ".cpl": "application/cpl+xml", + ".csrattrs": "application/csrattrs", + ".mpd": "application/dash+xml", + ".mpdd": "application/dashdelta", + ".davmount": "application/davmount+xml", + ".dcd": "application/DCD", + ".dcm": "application/dicom", + ".dii": "application/DII", + ".dit": "application/DIT", + ".xmls": "application/dskpp+xml", + ".dssc": "application/dssc+der", + ".xdssc": "application/dssc+xml", + ".dvc": "application/dvcs", + ".es": "application/ecmascript", + ".efi": "application/efi", + ".emma": "application/emma+xml", + ".emotionml": "application/emotionml+xml", + ".epub": "application/epub+zip", + ".exi": "application/exi", + ".finf": "application/fastinfoset", + ".fdt": "application/fdt+xml", + ".pfr": "application/font-tdpfr", + ".geojson": "application/geo+json", + ".gml": "application/gml+xml", + ".gz": "application/gzip", + ".tgz": "application/gzip", + ".stk": "application/hyperstudio", + ".ink": "application/inkml+xml", + ".inkml": "application/inkml+xml", + ".ipfix": "application/ipfix", + ".its": "application/its+xml", + ".js": "application/javascript", + ".jrd": "application/jrd+json", + ".json": "application/json", + ".json-patch": "application/json-patch+json", + ".jsonld": "application/ld+json", + ".lgr": "application/lgr+xml", + ".wlnk": "application/link-format", + ".lostxml": "application/lost+xml", + ".lostsyncxml": "application/lostsync+xml", + ".lxf": "application/LXF", + ".hqx": "application/mac-binhex40", + ".mads": "application/mads+xml", + ".mrc": "application/marc", + ".mrcx": "application/marcxml+xml", + ".nb": "application/mathematica", + ".ma": "application/mathematica", + ".mb": "application/mathematica", + ".mml": "application/mathml+xml", + ".mbox": "application/mbox", + ".meta4": "application/metalink4+xml", + ".mets": "application/mets+xml", + ".mf4": "application/MF4", + ".mods": "application/mods+xml", + ".m21": "application/mp21", + ".mp21": "application/mp21", + ".doc": "application/msword", + ".mxf": "application/mxf", + ".nq": "application/n-quads", + ".nt": "application/n-triples", + ".orq": "application/ocsp-request", + ".ors": "application/ocsp-response", + ".bin": "application/octet-stream", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".exe": "application/octet-stream", + ".class": "application/octet-stream", + ".so": "application/octet-stream", + ".dll": "application/octet-stream", + ".img": "application/octet-stream", + ".iso": "application/octet-stream", + ".oda": "application/oda", + ".odx": "application/ODX", + ".opf": "application/oebps-package+xml", + ".ogx": "application/ogg", + ".oxps": "application/oxps", + ".relo": "application/p2p-overlay+xml", + ".pdf": "application/pdf", + ".pdx": "application/PDX", + ".pgp": "application/pgp-encrypted", + ".sig": "application/pgp-signature", + ".p10": "application/pkcs10", + ".p12": "application/pkcs12", + ".pfx": "application/pkcs12", + ".p7m": "application/pkcs7-mime", + ".p7c": "application/pkcs7-mime", + ".p7s": "application/pkcs7-signature", + ".p8": "application/pkcs8", + ".cer": "application/pkix-cert", + ".crl": "application/pkix-crl", + ".pkipath": "application/pkix-pkipath", + ".pki": "application/pkixcmp", + ".pls": "application/pls+xml", + ".ps": "application/postscript", + ".eps": "application/postscript", + ".ai": "application/postscript", + ".provx": "application/provenance+xml", + ".cw": "application/prs.cww", + ".cww": "application/prs.cww", + ".hpub": "application/prs.hpub+zip", + ".rnd": "application/prs.nprend", + ".rct": "application/prs.nprend", + ".rdf-crypt": "application/prs.rdf-xml-crypt", + ".xsf": "application/prs.xsf+xml", + ".pskcxml": "application/pskc+xml", + ".rdf": "application/rdf+xml", + ".rif": "application/reginfo+xml", + ".rnc": "application/relax-ng-compact-syntax", + ".rld": "application/resource-lists-diff+xml", + ".rl": "application/resource-lists+xml", + ".rfcxml": "application/rfc+xml", + ".rs": "application/rls-services+xml", + ".gbr": "application/rpki-ghostbusters", + ".mft": "application/rpki-manifest", + ".roa": "application/rpki-roa", + ".rtf": "application/rtf", + ".scim": "application/scim+json", + ".scq": "application/scvp-cv-request", + ".scs": "application/scvp-cv-response", + ".spq": "application/scvp-vp-request", + ".spp": "application/scvp-vp-response", + ".sdp": "application/sdp", + ".soc": "application/sgml-open-catalog", + ".shf": "application/shf+xml", + ".siv": "application/sieve", + ".sieve": "application/sieve", + ".cl": "application/simple-filter+xml", + ".smil": "application/smil+xml", + ".smi": "application/smil+xml", + ".sml": "application/smil+xml", + ".rq": "application/sparql-query", + ".srx": "application/sparql-results+xml", + ".sql": "application/sql", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".sru": "application/sru+xml", + ".ssml": "application/ssml+xml", + ".tau": "application/tamp-apex-update", + ".auc": "application/tamp-apex-update-confirm", + ".tcu": "application/tamp-community-update", + ".cuc": "application/tamp-community-update-confirm", + ".ter": "application/tamp-error", + ".tsa": "application/tamp-sequence-adjust", + ".sac": "application/tamp-sequence-adjust-confirm", + ".tur": "application/tamp-update", + ".tuc": "application/tamp-update-confirm", + ".tei": "application/tei+xml", + ".teiCorpus": "application/tei+xml", + ".odd": "application/tei+xml", + ".tfi": "application/thraud+xml", + ".tsq": "application/timestamp-query", + ".tsr": "application/timestamp-reply", + ".tsd": "application/timestamped-data", + ".trig": "application/trig", + ".ttml": "application/ttml+xml", + ".gsheet": "application/urc-grpsheet+xml", + ".rsheet": "application/urc-ressheet+xml", + ".td": "application/urc-targetdesc+xml", + ".uis": "application/urc-uisocketdesc+xml", + ".plb": "application/vnd.3gpp.pic-bw-large", + ".psb": "application/vnd.3gpp.pic-bw-small", + ".pvb": "application/vnd.3gpp.pic-bw-var", + ".sms": "application/vnd.3gpp2.sms", + ".tcap": "application/vnd.3gpp2.tcap", + ".imgcal": "application/vnd.3lightssoftware.imagescal", + ".pwn": "application/vnd.3M.Post-it-Notes", + ".aso": "application/vnd.accpac.simply.aso", + ".imp": "application/vnd.accpac.simply.imp", + ".acu": "application/vnd.acucobol", + ".atc": "application/vnd.acucorp", + ".acutc": "application/vnd.acucorp", + ".swf": "application/vnd.adobe.flash.movie", + ".fcdt": "application/vnd.adobe.formscentral.fcdt", + ".fxp": "application/vnd.adobe.fxp", + ".fxpl": "application/vnd.adobe.fxp", + ".xdp": "application/vnd.adobe.xdp+xml", + ".xfdf": "application/vnd.adobe.xfdf", + ".ahead": "application/vnd.ahead.space", + ".azf": "application/vnd.airzip.filesecure.azf", + ".azs": "application/vnd.airzip.filesecure.azs", + ".azw3": "application/vnd.amazon.mobi8-ebook", + ".acc": "application/vnd.americandynamics.acc", + ".ami": "application/vnd.amiga.ami", + ".apkg": "application/vnd.anki", + ".cii": "application/vnd.anser-web-certificate-issue-initiation", + ".fti": "application/vnd.anser-web-funds-transfer-initiation", + ".dist": "application/vnd.apple.installer+xml", + ".distz": "application/vnd.apple.installer+xml", + ".pkg": "application/vnd.apple.installer+xml", + ".mpkg": "application/vnd.apple.installer+xml", + ".m3u8": "application/vnd.apple.mpegurl", + ".swi": "application/vnd.aristanetworks.swi", + ".iota": "application/vnd.astraea-software.iota", + ".aep": "application/vnd.audiograph", + ".package": "application/vnd.autopackage", + ".bmml": "application/vnd.balsamiq.bmml+xml", + ".bmpr": "application/vnd.balsamiq.bmpr", + ".mpm": "application/vnd.blueice.multipass", + ".ep": "application/vnd.bluetooth.ep.oob", + ".le": "application/vnd.bluetooth.le.oob", + ".bmi": "application/vnd.bmi", + ".rep": "application/vnd.businessobjects", + ".tlclient": "application/vnd.cendio.thinlinc.clientconf", + ".cdxml": "application/vnd.chemdraw+xml", + ".pgn": "application/vnd.chess-pgn", + ".mmd": "application/vnd.chipnuts.karaoke-mmd", + ".cdy": "application/vnd.cinderella", + ".csl": "application/vnd.citationstyles.style+xml", + ".cla": "application/vnd.claymore", + ".rp9": "application/vnd.cloanto.rp9", + ".c4g": "application/vnd.clonk.c4group", + ".c4d": "application/vnd.clonk.c4group", + ".c4f": "application/vnd.clonk.c4group", + ".c4p": "application/vnd.clonk.c4group", + ".c4u": "application/vnd.clonk.c4group", + ".c11amc": "application/vnd.cluetrust.cartomobile-config", + ".c11amz": "application/vnd.cluetrust.cartomobile-config-pkg", + ".coffee": "application/vnd.coffeescript", + ".cbz": "application/vnd.comicbook+zip", + ".ica": "application/vnd.commerce-battelle", + ".icf": "application/vnd.commerce-battelle", + ".icd": "application/vnd.commerce-battelle", + ".ic0": "application/vnd.commerce-battelle", + ".ic1": "application/vnd.commerce-battelle", + ".ic2": "application/vnd.commerce-battelle", + ".ic3": "application/vnd.commerce-battelle", + ".ic4": "application/vnd.commerce-battelle", + ".ic5": "application/vnd.commerce-battelle", + ".ic6": "application/vnd.commerce-battelle", + ".ic7": "application/vnd.commerce-battelle", + ".ic8": "application/vnd.commerce-battelle", + ".csp": "application/vnd.commonspace", + ".cst": "application/vnd.commonspace", + ".cdbcmsg": "application/vnd.contact.cmsg", + ".ign": "application/vnd.coreos.ignition+json", + ".ignition": "application/vnd.coreos.ignition+json", + ".cmc": "application/vnd.cosmocaller", + ".clkx": "application/vnd.crick.clicker", + ".clkk": "application/vnd.crick.clicker.keyboard", + ".clkp": "application/vnd.crick.clicker.palette", + ".clkt": "application/vnd.crick.clicker.template", + ".clkw": "application/vnd.crick.clicker.wordbank", + ".wbs": "application/vnd.criticaltools.wbs+xml", + ".pml": "application/vnd.ctc-posml", + ".ppd": "application/vnd.cups-ppd", + ".curl": "application/vnd.curl", + ".dart": "application/vnd.dart", + ".rdz": "application/vnd.data-vision.rdz", + ".deb": "application/vnd.debian.binary-package", + ".udeb": "application/vnd.debian.binary-package", + ".uvf": "application/vnd.dece.data", + ".uvvf": "application/vnd.dece.data", + ".uvd": "application/vnd.dece.data", + ".uvvd": "application/vnd.dece.data", + ".uvt": "application/vnd.dece.ttml+xml", + ".uvvt": "application/vnd.dece.ttml+xml", + ".uvx": "application/vnd.dece.unspecified", + ".uvvx": "application/vnd.dece.unspecified", + ".uvz": "application/vnd.dece.zip", + ".uvvz": "application/vnd.dece.zip", + ".fe_launch": "application/vnd.denovo.fcselayout-link", + ".dsm": "application/vnd.desmume.movie", + ".dna": "application/vnd.dna", + ".docjson": "application/vnd.document+json", + ".scld": "application/vnd.doremir.scorecloud-binary-document", + ".dpg": "application/vnd.dpgraph", + ".mwc": "application/vnd.dpgraph", + ".dpgraph": "application/vnd.dpgraph", + ".dfac": "application/vnd.dreamfactory", + ".fla": "application/vnd.dtg.local.flash", + ".ait": "application/vnd.dvb.ait", + ".svc": "application/vnd.dvb.service", + ".geo": "application/vnd.dynageo", + ".dzr": "application/vnd.dzr", + ".mag": "application/vnd.ecowin.chart", + ".nml": "application/vnd.enliven", + ".esf": "application/vnd.epson.esf", + ".msf": "application/vnd.epson.msf", + ".qam": "application/vnd.epson.quickanime", + ".slt": "application/vnd.epson.salt", + ".ssf": "application/vnd.epson.ssf", + ".qcall": "application/vnd.ericsson.quickcall", + ".qca": "application/vnd.ericsson.quickcall", + ".espass": "application/vnd.espass-espass+zip", + ".es3": "application/vnd.eszigno3+xml", + ".et3": "application/vnd.eszigno3+xml", + ".asice": "application/vnd.etsi.asic-e+zip", + ".sce": "application/vnd.etsi.asic-e+zip", + ".asics": "application/vnd.etsi.asic-s+zip", + ".tst": "application/vnd.etsi.timestamp-token", + ".ez2": "application/vnd.ezpix-album", + ".ez3": "application/vnd.ezpix-package", + ".dim": "application/vnd.fastcopy-disk-image", + ".fdf": "application/vnd.fdf", + ".msd": "application/vnd.fdsn.mseed", + ".mseed": "application/vnd.fdsn.mseed", + ".seed": "application/vnd.fdsn.seed", + ".dataless": "application/vnd.fdsn.seed", + ".zfc": "application/vnd.filmit.zfc", + ".gph": "application/vnd.FloGraphIt", + ".ftc": "application/vnd.fluxtime.clip", + ".sfd": "application/vnd.font-fontforge-sfd", + ".fm": "application/vnd.framemaker", + ".fnc": "application/vnd.frogans.fnc", + ".ltf": "application/vnd.frogans.ltf", + ".fsc": "application/vnd.fsc.weblaunch", + ".oas": "application/vnd.fujitsu.oasys", + ".oa2": "application/vnd.fujitsu.oasys2", + ".oa3": "application/vnd.fujitsu.oasys3", + ".fg5": "application/vnd.fujitsu.oasysgp", + ".bh2": "application/vnd.fujitsu.oasysprs", + ".ddd": "application/vnd.fujixerox.ddd", + ".xdw": "application/vnd.fujixerox.docuworks", + ".xbd": "application/vnd.fujixerox.docuworks.binder", + ".xct": "application/vnd.fujixerox.docuworks.container", + ".fzs": "application/vnd.fuzzysheet", + ".txd": "application/vnd.genomatix.tuxedo", + ".g3": "application/vnd.geocube+xml", + ".g³": "application/vnd.geocube+xml", + ".ggb": "application/vnd.geogebra.file", + ".ggt": "application/vnd.geogebra.tool", + ".gex": "application/vnd.geometry-explorer", + ".gre": "application/vnd.geometry-explorer", + ".gxt": "application/vnd.geonext", + ".g2w": "application/vnd.geoplan", + ".g3w": "application/vnd.geospace", + ".gmx": "application/vnd.gmx", + ".kml": "application/vnd.google-earth.kml+xml", + ".kmz": "application/vnd.google-earth.kmz", + ".gqf": "application/vnd.grafeq", + ".gqs": "application/vnd.grafeq", + ".gac": "application/vnd.groove-account", + ".ghf": "application/vnd.groove-help", + ".gim": "application/vnd.groove-identity-message", + ".grv": "application/vnd.groove-injector", + ".gtm": "application/vnd.groove-tool-message", + ".tpl": "application/vnd.groove-tool-template", + ".vcg": "application/vnd.groove-vcard", + ".hal": "application/vnd.hal+xml", + ".zmm": "application/vnd.HandHeld-Entertainment+xml", + ".hbci": "application/vnd.hbci", + ".hbc": "application/vnd.hbci", + ".kom": "application/vnd.hbci", + ".upa": "application/vnd.hbci", + ".pkd": "application/vnd.hbci", + ".bpd": "application/vnd.hbci", + ".hdt": "application/vnd.hdt", + ".les": "application/vnd.hhe.lesson-player", + ".hpgl": "application/vnd.hp-HPGL", + ".hpi": "application/vnd.hp-hpid", + ".hpid": "application/vnd.hp-hpid", + ".hps": "application/vnd.hp-hps", + ".jlt": "application/vnd.hp-jlyt", + ".pcl": "application/vnd.hp-PCL", + ".sfd-hdstx": "application/vnd.hydrostatix.sof-data", + ".x3d": "application/vnd.hzn-3d-crossword", + ".emm": "application/vnd.ibm.electronic-media", + ".mpy": "application/vnd.ibm.MiniPay", + ".list3820": "application/vnd.ibm.modcap", + ".listafp": "application/vnd.ibm.modcap", + ".afp": "application/vnd.ibm.modcap", + ".pseg3820": "application/vnd.ibm.modcap", + ".irm": "application/vnd.ibm.rights-management", + ".sc": "application/vnd.ibm.secure-container", + ".icc": "application/vnd.iccprofile", + ".icm": "application/vnd.iccprofile", + ".1905.1": "application/vnd.ieee.1905", + ".igl": "application/vnd.igloader", + ".imf": "application/vnd.imagemeter.folder+zip", + ".imi": "application/vnd.imagemeter.image+zip", + ".ivp": "application/vnd.immervision-ivp", + ".ivu": "application/vnd.immervision-ivu", + ".imscc": "application/vnd.ims.imsccv1p1", + ".igm": "application/vnd.insors.igm", + ".xpw": "application/vnd.intercon.formnet", + ".xpx": "application/vnd.intercon.formnet", + ".i2g": "application/vnd.intergeo", + ".qbo": "application/vnd.intu.qbo", + ".qfx": "application/vnd.intu.qfx", + ".rcprofile": "application/vnd.ipunplugged.rcprofile", + ".irp": "application/vnd.irepository.package+xml", + ".xpr": "application/vnd.is-xpr", + ".fcs": "application/vnd.isac.fcs", + ".jam": "application/vnd.jam", + ".rms": "application/vnd.jcp.javame.midlet-rms", + ".jisp": "application/vnd.jisp", + ".joda": "application/vnd.joost.joda-archive", + ".ktz": "application/vnd.kahootz", + ".ktr": "application/vnd.kahootz", + ".karbon": "application/vnd.kde.karbon", + ".chrt": "application/vnd.kde.kchart", + ".kfo": "application/vnd.kde.kformula", + ".flw": "application/vnd.kde.kivio", + ".kon": "application/vnd.kde.kontour", + ".kpr": "application/vnd.kde.kpresenter", + ".kpt": "application/vnd.kde.kpresenter", + ".ksp": "application/vnd.kde.kspread", + ".kwd": "application/vnd.kde.kword", + ".kwt": "application/vnd.kde.kword", + ".htke": "application/vnd.kenameaapp", + ".kia": "application/vnd.kidspiration", + ".kne": "application/vnd.Kinar", + ".knp": "application/vnd.Kinar", + ".sdf": "application/vnd.Kinar", + ".skp": "application/vnd.koan", + ".skd": "application/vnd.koan", + ".skm": "application/vnd.koan", + ".skt": "application/vnd.koan", + ".sse": "application/vnd.kodak-descriptor", + ".lasjson": "application/vnd.las.las+json", + ".lasxml": "application/vnd.las.las+xml", + ".lbd": "application/vnd.llamagraphics.life-balance.desktop", + ".lbe": "application/vnd.llamagraphics.life-balance.exchange+xml", + ".123": "application/vnd.lotus-1-2-3", + ".wk4": "application/vnd.lotus-1-2-3", + ".wk3": "application/vnd.lotus-1-2-3", + ".wk1": "application/vnd.lotus-1-2-3", + ".apr": "application/vnd.lotus-approach", + ".vew": "application/vnd.lotus-approach", + ".prz": "application/vnd.lotus-freelance", + ".pre": "application/vnd.lotus-freelance", + ".nsf": "application/vnd.lotus-notes", + ".ntf": "application/vnd.lotus-notes", + ".ndl": "application/vnd.lotus-notes", + ".ns4": "application/vnd.lotus-notes", + ".ns3": "application/vnd.lotus-notes", + ".ns2": "application/vnd.lotus-notes", + ".nsh": "application/vnd.lotus-notes", + ".nsg": "application/vnd.lotus-notes", + ".or3": "application/vnd.lotus-organizer", + ".or2": "application/vnd.lotus-organizer", + ".org": "application/vnd.lotus-organizer", + ".scm": "application/vnd.lotus-screencam", + ".lwp": "application/vnd.lotus-wordpro", + ".sam": "application/vnd.lotus-wordpro", + ".portpkg": "application/vnd.macports.portpkg", + ".mvt": "application/vnd.mapbox-vector-tile", + ".mdc": "application/vnd.marlin.drm.mdcf", + ".mmdb": "application/vnd.maxmind.maxmind-db", + ".mcd": "application/vnd.mcd", + ".mc1": "application/vnd.medcalcdata", + ".cdkey": "application/vnd.mediastation.cdkey", + ".mwf": "application/vnd.MFER", + ".mfm": "application/vnd.mfmp", + ".flo": "application/vnd.micrografx.flo", + ".igx": "application/vnd.micrografx.igx", + ".mif": "application/vnd.mif", + ".daf": "application/vnd.Mobius.DAF", + ".dis": "application/vnd.Mobius.DIS", + ".mbk": "application/vnd.Mobius.MBK", + ".mqy": "application/vnd.Mobius.MQY", + ".msl": "application/vnd.Mobius.MSL", + ".plc": "application/vnd.Mobius.PLC", + ".txf": "application/vnd.Mobius.TXF", + ".mpn": "application/vnd.mophun.application", + ".mpc": "application/vnd.mophun.certificate", + ".xul": "application/vnd.mozilla.xul+xml", + ".3mf": "application/vnd.ms-3mfdocument", + ".cil": "application/vnd.ms-artgalry", + ".asf": "application/vnd.ms-asf", + ".cab": "application/vnd.ms-cab-compressed", + ".xls": "application/vnd.ms-excel", + ".xlm": "application/vnd.ms-excel", + ".xla": "application/vnd.ms-excel", + ".xlc": "application/vnd.ms-excel", + ".xlt": "application/vnd.ms-excel", + ".xlw": "application/vnd.ms-excel", + ".xltm": "application/vnd.ms-excel.template.macroEnabled.12", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".xlsm": "application/vnd.ms-excel.sheet.macroEnabled.12", + ".eot": "application/vnd.ms-fontobject", + ".chm": "application/vnd.ms-htmlhelp", + ".ims": "application/vnd.ms-ims", + ".lrm": "application/vnd.ms-lrm", + ".thmx": "application/vnd.ms-officetheme", + ".ppt": "application/vnd.ms-powerpoint", + ".pps": "application/vnd.ms-powerpoint", + ".pot": "application/vnd.ms-powerpoint", + ".ppam": "application/vnd.ms-powerpoint.addin.macroEnabled.12", + ".pptm": "application/vnd.ms-powerpoint.presentation.macroEnabled.12", + ".sldm": "application/vnd.ms-powerpoint.slide.macroEnabled.12", + ".ppsm": "application/vnd.ms-powerpoint.slideshow.macroEnabled.12", + ".potm": "application/vnd.ms-powerpoint.template.macroEnabled.12", + ".mpp": "application/vnd.ms-project", + ".mpt": "application/vnd.ms-project", + ".tnef": "application/vnd.ms-tnef", + ".tnf": "application/vnd.ms-tnef", + ".docm": "application/vnd.ms-word.document.macroEnabled.12", + ".dotm": "application/vnd.ms-word.template.macroEnabled.12", + ".wcm": "application/vnd.ms-works", + ".wdb": "application/vnd.ms-works", + ".wks": "application/vnd.ms-works", + ".wps": "application/vnd.ms-works", + ".wpl": "application/vnd.ms-wpl", + ".xps": "application/vnd.ms-xpsdocument", + ".msa": "application/vnd.msa-disk-image", + ".mseq": "application/vnd.mseq", + ".crtr": "application/vnd.multiad.creator", + ".cif": "application/vnd.multiad.creator.cif", + ".mus": "application/vnd.musician", + ".msty": "application/vnd.muvee.style", + ".taglet": "application/vnd.mynfc", + ".entity": "application/vnd.nervana", + ".request": "application/vnd.nervana", + ".bkm": "application/vnd.nervana", + ".kcm": "application/vnd.nervana", + ".nitf": "application/vnd.nitf", + ".nlu": "application/vnd.neurolanguage.nlu", + ".nds": "application/vnd.nintendo.nitro.rom", + ".sfc": "application/vnd.nintendo.snes.rom", + ".smc": "application/vnd.nintendo.snes.rom", + ".nnd": "application/vnd.noblenet-directory", + ".nns": "application/vnd.noblenet-sealer", + ".nnw": "application/vnd.noblenet-web", + ".ac": "application/vnd.nokia.n-gage.ac+xml", + ".ngdat": "application/vnd.nokia.n-gage.data", + ".n-gage": "application/vnd.nokia.n-gage.symbian.install", + ".rpst": "application/vnd.nokia.radio-preset", + ".rpss": "application/vnd.nokia.radio-presets", + ".edm": "application/vnd.novadigm.EDM", + ".edx": "application/vnd.novadigm.EDX", + ".ext": "application/vnd.novadigm.EXT", + ".odc": "application/vnd.oasis.opendocument.chart", + ".otc": "application/vnd.oasis.opendocument.chart-template", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".oti": "application/vnd.oasis.opendocument.image-template", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".xo": "application/vnd.olpc-sugar", + ".dd2": "application/vnd.oma.dd2+xml", + ".tam": "application/vnd.onepager", + ".tamp": "application/vnd.onepagertamp", + ".tamx": "application/vnd.onepagertamx", + ".tat": "application/vnd.onepagertat", + ".tatp": "application/vnd.onepagertatp", + ".tatx": "application/vnd.onepagertatx", + ".obgx": "application/vnd.openblox.game+xml", + ".obg": "application/vnd.openblox.game-binary", + ".oeb": "application/vnd.openeye.oeb", + ".oxt": "application/vnd.openofficeorg.extension", + ".osm": "application/vnd.openstreetmap.data+xml", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".ndc": "application/vnd.osa.netdeploy", + ".mgp": "application/vnd.osgeo.mapguide.package", + ".dp": "application/vnd.osgi.dp", + ".esa": "application/vnd.osgi.subsystem", + ".oxlicg": "application/vnd.oxli.countgraph", + ".prc": "application/vnd.palm", + ".pdb": "application/vnd.palm", + ".pqa": "application/vnd.palm", + ".oprc": "application/vnd.palm", + ".plp": "application/vnd.panoply", + ".paw": "application/vnd.pawaafile", + ".str": "application/vnd.pg.format", + ".ei6": "application/vnd.pg.osasli", + ".pil": "application/vnd.piaccess.application-license", + ".efif": "application/vnd.picsel", + ".wg": "application/vnd.pmi.widget", + ".plf": "application/vnd.pocketlearn", + ".pbd": "application/vnd.powerbuilder6", + ".preminet": "application/vnd.preminet", + ".box": "application/vnd.previewsystems.box", + ".vbox": "application/vnd.previewsystems.box", + ".mgz": "application/vnd.proteus.magazine", + ".qps": "application/vnd.publishare-delta-tree", + ".ptid": "application/vnd.pvi.ptid1", + ".bar": "application/vnd.qualcomm.brew-app-res", + ".qxd": "application/vnd.Quark.QuarkXPress", + ".qxt": "application/vnd.Quark.QuarkXPress", + ".qwd": "application/vnd.Quark.QuarkXPress", + ".qwt": "application/vnd.Quark.QuarkXPress", + ".qxl": "application/vnd.Quark.QuarkXPress", + ".qxb": "application/vnd.Quark.QuarkXPress", + ".quox": "application/vnd.quobject-quoxdocument", + ".quiz": "application/vnd.quobject-quoxdocument", + ".tree": "application/vnd.rainstor.data", + ".rar": "application/vnd.rar", + ".bed": "application/vnd.realvnc.bed", + ".mxl": "application/vnd.recordare.musicxml", + ".cryptonote": "application/vnd.rig.cryptonote", + ".link66": "application/vnd.route66.link66+xml", + ".st": "application/vnd.sailingtracker.track", + ".scd": "application/vnd.scribus", + ".sla": "application/vnd.scribus", + ".slaz": "application/vnd.scribus", + ".s3df": "application/vnd.sealed.3df", + ".scsf": "application/vnd.sealed.csf", + ".sdoc": "application/vnd.sealed.doc", + ".sdo": "application/vnd.sealed.doc", + ".s1w": "application/vnd.sealed.doc", + ".seml": "application/vnd.sealed.eml", + ".sem": "application/vnd.sealed.eml", + ".smht": "application/vnd.sealed.mht", + ".smh": "application/vnd.sealed.mht", + ".sppt": "application/vnd.sealed.ppt", + ".s1p": "application/vnd.sealed.ppt", + ".stif": "application/vnd.sealed.tiff", + ".sxls": "application/vnd.sealed.xls", + ".sxl": "application/vnd.sealed.xls", + ".s1e": "application/vnd.sealed.xls", + ".stml": "application/vnd.sealedmedia.softseal.html", + ".s1h": "application/vnd.sealedmedia.softseal.html", + ".spdf": "application/vnd.sealedmedia.softseal.pdf", + ".spd": "application/vnd.sealedmedia.softseal.pdf", + ".s1a": "application/vnd.sealedmedia.softseal.pdf", + ".see": "application/vnd.seemail", + ".sema": "application/vnd.sema", + ".semd": "application/vnd.semd", + ".semf": "application/vnd.semf", + ".ifm": "application/vnd.shana.informed.formdata", + ".itp": "application/vnd.shana.informed.formtemplate", + ".iif": "application/vnd.shana.informed.interchange", + ".ipk": "application/vnd.shana.informed.package", + ".twd": "application/vnd.SimTech-MindMapper", + ".twds": "application/vnd.SimTech-MindMapper", + ".mmf": "application/vnd.smaf", + ".notebook": "application/vnd.smart.notebook", + ".teacher": "application/vnd.smart.teacher", + ".fo": "application/vnd.software602.filler.form+xml", + ".zfo": "application/vnd.software602.filler.form-xml-zip", + ".sdkm": "application/vnd.solent.sdkm+xml", + ".sdkd": "application/vnd.solent.sdkm+xml", + ".dxp": "application/vnd.spotfire.dxp", + ".sfs": "application/vnd.spotfire.sfs", + ".smzip": "application/vnd.stepmania.package", + ".sm": "application/vnd.stepmania.stepchart", + ".wadl": "application/vnd.sun.wadl+xml", + ".sus": "application/vnd.sus-calendar", + ".susp": "application/vnd.sus-calendar", + ".xsm": "application/vnd.syncml+xml", + ".bdm": "application/vnd.syncml.dm+wbxml", + ".xdm": "application/vnd.syncml.dm+xml", + ".ddf": "application/vnd.syncml.dmddf+xml", + ".tao": "application/vnd.tao.intent-module-archive", + ".pcap": "application/vnd.tcpdump.pcap", + ".cap": "application/vnd.tcpdump.pcap", + ".dmp": "application/vnd.tcpdump.pcap", + ".qvd": "application/vnd.theqvd", + ".vfr": "application/vnd.tml", + ".viaframe": "application/vnd.tml", + ".tmo": "application/vnd.tmobile-livetv", + ".tpt": "application/vnd.trid.tpt", + ".mxs": "application/vnd.triscape.mxs", + ".tra": "application/vnd.trueapp", + ".ufdl": "application/vnd.ufdl", + ".ufd": "application/vnd.ufdl", + ".frm": "application/vnd.ufdl", + ".utz": "application/vnd.uiq.theme", + ".umj": "application/vnd.umajin", + ".unityweb": "application/vnd.unity", + ".uoml": "application/vnd.uoml+xml", + ".uo": "application/vnd.uoml+xml", + ".urim": "application/vnd.uri-map", + ".urimap": "application/vnd.uri-map", + ".vmt": "application/vnd.valve.source.material", + ".vcx": "application/vnd.vcx", + ".mxi": "application/vnd.vd-study", + ".study-inter": "application/vnd.vd-study", + ".model-inter": "application/vnd.vd-study", + ".vwx": "application/vnd.vectorworks", + ".vsc": "application/vnd.vidsoft.vidconference", + ".vsd": "application/vnd.visio", + ".vst": "application/vnd.visio", + ".vsw": "application/vnd.visio", + ".vss": "application/vnd.visio", + ".vis": "application/vnd.visionary", + ".vsf": "application/vnd.vsf", + ".sic": "application/vnd.wap.sic", + ".slc": "application/vnd.wap.slc", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".wtb": "application/vnd.webturbo", + ".p2p": "application/vnd.wfa.p2p", + ".wsc": "application/vnd.wfa.wsc", + ".wmc": "application/vnd.wmc", + ".m": "application/vnd.wolfram.mathematica.package", + ".nbp": "application/vnd.wolfram.player", + ".wpd": "application/vnd.wordperfect", + ".wqd": "application/vnd.wqd", + ".stf": "application/vnd.wt.stf", + ".wv": "application/vnd.wv.csp+wbxml", + ".xar": "application/vnd.xara", + ".xfdl": "application/vnd.xfdl", + ".xfd": "application/vnd.xfdl", + ".cpkg": "application/vnd.xmpie.cpkg", + ".dpkg": "application/vnd.xmpie.dpkg", + ".ppkg": "application/vnd.xmpie.ppkg", + ".xlim": "application/vnd.xmpie.xlim", + ".hvd": "application/vnd.yamaha.hv-dic", + ".hvs": "application/vnd.yamaha.hv-script", + ".hvp": "application/vnd.yamaha.hv-voice", + ".osf": "application/vnd.yamaha.openscoreformat", + ".saf": "application/vnd.yamaha.smaf-audio", + ".spf": "application/vnd.yamaha.smaf-phrase", + ".yme": "application/vnd.yaoweme", + ".cmp": "application/vnd.yellowriver-custom-menu", + ".zir": "application/vnd.zul", + ".zirz": "application/vnd.zul", + ".zaz": "application/vnd.zzazz.deck+xml", + ".vxml": "application/voicexml+xml", + ".wif": "application/watcherinfo+xml", + ".wgt": "application/widget", + ".wsdl": "application/wsdl+xml", + ".wspolicy": "application/wspolicy+xml", + ".xav": "application/xcap-att+xml", + ".xca": "application/xcap-caps+xml", + ".xdf": "application/xcap-diff+xml", + ".xel": "application/xcap-el+xml", + ".xer": "application/xcap-error+xml", + ".xns": "application/xcap-ns+xml", + ".xhtml": "application/xhtml+xml", + ".xhtm": "application/xhtml+xml", + ".xht": "application/xhtml+xml", + ".dtd": "application/xml-dtd", + ".xop": "application/xop+xml", + ".xsl": "application/xslt+xml", + ".xslt": "application/xslt+xml", + ".mxml": "application/xv+xml", + ".xhvml": "application/xv+xml", + ".xvml": "application/xv+xml", + ".xvm": "application/xv+xml", + ".yang": "application/yang", + ".yin": "application/yin+xml", + ".zip": "application/zip", + ".726": "audio/32kadpcm", + ".ac3": "audio/ac3", + ".amr": "audio/AMR", + ".awb": "audio/AMR-WB", + ".acn": "audio/asc", + ".aal": "audio/ATRAC-ADVANCED-LOSSLESS", + ".atx": "audio/ATRAC-X", + ".at3": "audio/ATRAC3", + ".aa3": "audio/ATRAC3", + ".omg": "audio/ATRAC3", + ".au": "audio/basic", + ".snd": "audio/basic", + ".dls": "audio/dls", + ".evc": "audio/EVRC", + ".evb": "audio/EVRCB", + ".enw": "audio/EVRCNW", + ".evw": "audio/EVRCWB", + ".lbc": "audio/iLBC", + ".l16": "audio/L16", + ".mxmf": "audio/mobile-xmf", + ".m4a": "audio/mp4", + ".mp3": "audio/mpeg", + ".mpga": "audio/mpeg", + ".mp1": "audio/mpeg", + ".mp2": "audio/mpeg", + ".oga": "audio/ogg", + ".ogg": "audio/ogg", + ".opus": "audio/ogg", + ".spx": "audio/ogg", + ".sid": "audio/prs.sid", + ".psid": "audio/prs.sid", + ".qcp": "audio/qcelp", + ".smv": "audio/SMV", + ".koz": "audio/vnd.audikoz", + ".uva": "audio/vnd.dece.audio", + ".uvva": "audio/vnd.dece.audio", + ".eol": "audio/vnd.digital-winds", + ".mlp": "audio/vnd.dolby.mlp", + ".dts": "audio/vnd.dts", + ".dtshd": "audio/vnd.dts.hd", + ".plj": "audio/vnd.everad.plj", + ".lvp": "audio/vnd.lucent.voice", + ".pya": "audio/vnd.ms-playready.media.pya", + ".vbk": "audio/vnd.nortel.vbk", + ".ecelp4800": "audio/vnd.nuera.ecelp4800", + ".ecelp7470": "audio/vnd.nuera.ecelp7470", + ".ecelp9600": "audio/vnd.nuera.ecelp9600", + ".rip": "audio/vnd.rip", + ".smp3": "audio/vnd.sealedmedia.softseal.mpeg", + ".smp": "audio/vnd.sealedmedia.softseal.mpeg", + ".s1m": "audio/vnd.sealedmedia.softseal.mpeg", + ".ttc": "font/collection", + ".otf": "font/otf", + ".ttf": "font/ttf", + ".woff": "font/woff", + ".woff2": "font/woff2", + ".bmp": "image/bmp", + ".dib": "image/bmp", + ".cgm": "image/cgm", + ".drle": "image/dicom-rle", + ".emf": "image/emf", + ".fits": "image/fits", + ".fit": "image/fits", + ".fts": "image/fits", + ".gif": "image/gif", + ".ief": "image/ief", + ".jls": "image/jls", + ".jp2": "image/jp2", + ".jpg2": "image/jp2", + ".jpg": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpe": "image/jpeg", + ".jfif": "image/jpeg", + ".jpm": "image/jpm", + ".jpgm": "image/jpm", + ".jpx": "image/jpx", + ".jpf": "image/jpx", + ".ktx": "image/ktx", + ".png": "image/png", + ".btif": "image/prs.btif", + ".btf": "image/prs.btif", + ".pti": "image/prs.pti", + ".svg": "image/svg+xml", + ".svgz": "image/svg+xml", + ".t38": "image/t38", + ".tiff": "image/tiff", + ".tif": "image/tiff", + ".tfx": "image/tiff-fx", + ".psd": "image/vnd.adobe.photoshop", + ".azv": "image/vnd.airzip.accelerator.azv", + ".uvi": "image/vnd.dece.graphic", + ".uvvi": "image/vnd.dece.graphic", + ".uvg": "image/vnd.dece.graphic", + ".uvvg": "image/vnd.dece.graphic", + ".djvu": "image/vnd.djvu", + ".djv": "image/vnd.djvu", + ".dwg": "image/vnd.dwg", + ".dxf": "image/vnd.dxf", + ".fbs": "image/vnd.fastbidsheet", + ".fpx": "image/vnd.fpx", + ".fst": "image/vnd.fst", + ".mmr": "image/vnd.fujixerox.edmics-mmr", + ".rlc": "image/vnd.fujixerox.edmics-rlc", + ".pgb": "image/vnd.globalgraphics.pgb", + ".ico": "image/vnd.microsoft.icon", + ".apng": "image/vnd.mozilla.apng", + ".mdi": "image/vnd.ms-modi", + ".hdr": "image/vnd.radiance", + ".rgbe": "image/vnd.radiance", + ".xyze": "image/vnd.radiance", + ".spng": "image/vnd.sealed.png", + ".spn": "image/vnd.sealed.png", + ".s1n": "image/vnd.sealed.png", + ".sgif": "image/vnd.sealedmedia.softseal.gif", + ".sgi": "image/vnd.sealedmedia.softseal.gif", + ".s1g": "image/vnd.sealedmedia.softseal.gif", + ".sjpg": "image/vnd.sealedmedia.softseal.jpg", + ".sjp": "image/vnd.sealedmedia.softseal.jpg", + ".s1j": "image/vnd.sealedmedia.softseal.jpg", + ".tap": "image/vnd.tencent.tap", + ".vtf": "image/vnd.valve.source.texture", + ".wbmp": "image/vnd.wap.wbmp", + ".xif": "image/vnd.xiff", + ".pcx": "image/vnd.zbrush.pcx", + ".wmf": "image/wmf", + ".u8msg": "message/global", + ".u8dsn": "message/global-delivery-status", + ".u8mdn": "message/global-disposition-notification", + ".u8hdr": "message/global-headers", + ".eml": "message/rfc822", + ".mail": "message/rfc822", + ".art": "message/rfc822", + ".gltf": "model/gltf+json", + ".igs": "model/iges", + ".iges": "model/iges", + ".msh": "model/mesh", + ".mesh": "model/mesh", + ".silo": "model/mesh", + ".dae": "model/vnd.collada+xml", + ".dwf": "model/vnd.dwf", + ".gdl": "model/vnd.gdl", + ".gsm": "model/vnd.gdl", + ".win": "model/vnd.gdl", + ".dor": "model/vnd.gdl", + ".lmp": "model/vnd.gdl", + ".rsm": "model/vnd.gdl", + ".msm": "model/vnd.gdl", + ".ism": "model/vnd.gdl", + ".gtw": "model/vnd.gtw", + ".moml": "model/vnd.moml+xml", + ".mts": "model/vnd.mts", + ".ogex": "model/vnd.opengex", + ".x_b": "model/vnd.parasolid.transmit.binary", + ".xmt_bin": "model/vnd.parasolid.transmit.binary", + ".x_t": "model/vnd.parasolid.transmit.text", + ".xmt_txt": "model/vnd.parasolid.transmit.text", + ".bsp": "model/vnd.valve.source.compiled-map", + ".vtu": "model/vnd.vtu", + ".wrl": "model/vrml", + ".vrml": "model/vrml", + ".x3db": "model/x3d+xml", + ".x3dv": "model/x3d-vrml", + ".x3dvz": "model/x3d-vrml", + ".bmed": "multipart/vnd.bint.med-plus", + ".vpm": "multipart/voice-message", + ".appcache": "text/cache-manifest", + ".manifest": "text/cache-manifest", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".css": "text/css", + ".csv": "text/csv", + ".csvs": "text/csv-schema", + ".soa": "text/dns", + ".zone": "text/dns", + ".html": "text/html", + ".htm": "text/html", + ".cnd": "text/jcr-cnd", + ".markdown": "text/markdown", + ".md": "text/markdown", + ".miz": "text/mizar", + ".n3": "text/n3", + ".txt": "text/plain", + ".asc": "text/plain", + ".text": "text/plain", + ".pm": "text/plain", + ".el": "text/plain", + ".c": "text/plain", + ".h": "text/plain", + ".cc": "text/plain", + ".hh": "text/plain", + ".cxx": "text/plain", + ".hxx": "text/plain", + ".f90": "text/plain", + ".conf": "text/plain", + ".log": "text/plain", + ".provn": "text/provenance-notation", + ".rst": "text/prs.fallenstein.rst", + ".tag": "text/prs.lines.tag", + ".dsc": "text/prs.lines.tag", + ".rtx": "text/richtext", + ".sgml": "text/sgml", + ".sgm": "text/sgml", + ".tsv": "text/tab-separated-values", + ".t": "text/troff", + ".tr": "text/troff", + ".roff": "text/troff", + ".ttl": "text/turtle", + ".uris": "text/uri-list", + ".uri": "text/uri-list", + ".vcf": "text/vcard", + ".vcard": "text/vcard", + ".a": "text/vnd.a", + ".abc": "text/vnd.abc", + ".ascii": "text/vnd.ascii-art", + ".copyright": "text/vnd.debian.copyright", + ".dms": "text/vnd.DMClientScript", + ".sub": "text/vnd.dvb.subtitle", + ".jtd": "text/vnd.esmertec.theme-descriptor", + ".fly": "text/vnd.fly", + ".flx": "text/vnd.fmi.flexstor", + ".gv": "text/vnd.graphviz", + ".dot": "text/vnd.graphviz", + ".3dml": "text/vnd.in3d.3dml", + ".3dm": "text/vnd.in3d.3dml", + ".spot": "text/vnd.in3d.spot", + ".spo": "text/vnd.in3d.spot", + ".mpf": "text/vnd.ms-mediapackage", + ".ccc": "text/vnd.net2phone.commcenter.command", + ".uric": "text/vnd.si.uricatalogue", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".ts": "text/vnd.trolltech.linguist", + ".si": "text/vnd.wap.si", + ".sl": "text/vnd.wap.sl", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".xml": "text/xml", + ".xsd": "text/xml", + ".rng": "text/xml", + ".ent": "text/xml-external-parsed-entity", + ".3gp": "video/3gpp", + ".3gpp": "video/3gpp", + ".3g2": "video/3gpp2", + ".3gpp2": "video/3gpp2", + ".m4s": "video/iso.segment", + ".mj2": "video/mj2", + ".mjp2": "video/mj2", + ".mp4": "video/mp4", + ".mpg4": "video/mp4", + ".m4v": "video/mp4", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpe": "video/mpeg", + ".m1v": "video/mpeg", + ".m2v": "video/mpeg", + ".ogv": "video/ogg", + ".mov": "video/quicktime", + ".qt": "video/quicktime", + ".uvh": "video/vnd.dece.hd", + ".uvvh": "video/vnd.dece.hd", + ".uvm": "video/vnd.dece.mobile", + ".uvvm": "video/vnd.dece.mobile", + ".uvu": "video/vnd.dece.mp4", + ".uvvu": "video/vnd.dece.mp4", + ".uvp": "video/vnd.dece.pd", + ".uvvp": "video/vnd.dece.pd", + ".uvs": "video/vnd.dece.sd", + ".uvvs": "video/vnd.dece.sd", + ".uvv": "video/vnd.dece.video", + ".uvvv": "video/vnd.dece.video", + ".dvb": "video/vnd.dvb.file", + ".fvt": "video/vnd.fvt", + ".mxu": "video/vnd.mpegurl", + ".m4u": "video/vnd.mpegurl", + ".pyv": "video/vnd.ms-playready.media.pyv", + ".nim": "video/vnd.nokia.interleaved-multimedia", + ".bik": "video/vnd.radgamettools.bink", + ".bk2": "video/vnd.radgamettools.bink", + ".smk": "video/vnd.radgamettools.smacker", + ".smpg": "video/vnd.sealed.mpeg1", + ".s11": "video/vnd.sealed.mpeg1", + ".s14": "video/vnd.sealed.mpeg4", + ".sswf": "video/vnd.sealed.swf", + ".ssw": "video/vnd.sealed.swf", + ".smov": "video/vnd.sealedmedia.softseal.mov", + ".smo": "video/vnd.sealedmedia.softseal.mov", + ".s1q": "video/vnd.sealedmedia.softseal.mov", + ".viv": "video/vnd.vivo", + ".cpt": "application/mac-compactpro", + ".metalink": "application/metalink+xml", + ".owx": "application/owl+xml", + ".rss": "application/rss+xml", + ".apk": "application/vnd.android.package-archive", + ".dd": "application/vnd.oma.dd+xml", + ".dcf": "application/vnd.oma.drm.content", + ".o4a": "application/vnd.oma.drm.dcf", + ".o4v": "application/vnd.oma.drm.dcf", + ".dm": "application/vnd.oma.drm.message", + ".drc": "application/vnd.oma.drm.rights+wbxml", + ".dr": "application/vnd.oma.drm.rights+xml", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxm": "application/vnd.sun.xml.math", + ".sxw": "application/vnd.sun.xml.writer", + ".sxg": "application/vnd.sun.xml.writer.global", + ".stw": "application/vnd.sun.xml.writer.template", + ".sis": "application/vnd.symbian.install", + ".mms": "application/vnd.wap.mms-message", + ".anx": "application/x-annodex", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".crx": "application/x-chrome-extension", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dcr": "application/x-director", + ".dir": "application/x-director", + ".dxr": "application/x-director", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".pack": "application/x-java-pack200", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".nc": "application/x-netcdf", + ".cdf": "application/x-netcdf", + ".pl": "application/x-perl", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".texinfo": "application/x-texinfo", + ".texi": "application/x-texinfo", + ".man": "application/x-troff-man", + ".1": "application/x-troff-man", + ".2": "application/x-troff-man", + ".3": "application/x-troff-man", + ".4": "application/x-troff-man", + ".5": "application/x-troff-man", + ".6": "application/x-troff-man", + ".7": "application/x-troff-man", + ".8": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".xpi": "application/x-xpinstall", + ".xspf": "application/x-xspf+xml", + ".xz": "application/x-xz", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".kar": "audio/midi", + ".aif": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".axa": "audio/x-annodex", + ".flac": "audio/x-flac", + ".mka": "audio/x-matroska", + ".mod": "audio/x-mod", + ".ult": "audio/x-mod", + ".uni": "audio/x-mod", + ".m15": "audio/x-mod", + ".mtm": "audio/x-mod", + ".669": "audio/x-mod", + ".med": "audio/x-mod", + ".m3u": "audio/x-mpegurl", + ".wax": "audio/x-ms-wax", + ".wma": "audio/x-ms-wma", + ".ram": "audio/x-pn-realaudio", + ".rm": "audio/x-pn-realaudio", + ".ra": "audio/x-realaudio", + ".s3m": "audio/x-s3m", + ".stm": "audio/x-stm", + ".wav": "audio/x-wav", + ".xyz": "chemical/x-xyz", + ".webp": "image/webp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".tga": "image/x-targa", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".sandboxed": "text/html-sandboxed", + ".pod": "text/x-pod", + ".etx": "text/x-setext", + ".webm": "video/webm", + ".axv": "video/x-annodex", + ".flv": "video/x-flv", + ".fxm": "video/x-javafx", + ".mkv": "video/x-matroska", + ".mk3d": "video/x-matroska-3d", + ".asx": "video/x-ms-asf", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".sisx": "x-epoc/x-sisx-app", + } + for ext, name := range mimeTypes { + if err := mime.AddExtensionType(ext, name); err != nil { + panic(err) + } + } +} diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index af1269b93..5ec69373d 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -201,6 +201,13 @@ func (s *Server) HandleBzzGet(w http.ResponseWriter, r *http.Request) { defer reader.Close() w.Header().Set("Content-Type", "application/x-tar") + + fileName := uri.Addr + if found := path.Base(uri.Path); found != "" && found != "." && found != "/" { + fileName = found + } + w.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s.tar\"", fileName)) + w.WriteHeader(http.StatusOK) io.Copy(w, reader) return @@ -487,6 +494,7 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) { // The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) log.Debug("handle.post.resource", "ruid", ruid) var err error @@ -496,9 +504,24 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { RespondError(w, r, err.Error(), http.StatusInternalServerError) return } + + view, err := s.api.ResolveResourceView(r.Context(), uri, r.URL.Query()) + if err != nil { // couldn't parse query string or retrieve manifest + getFail.Inc(1) + httpStatus := http.StatusBadRequest + if err == api.ErrCannotLoadResourceManifest || err == api.ErrCannotResolveResourceURI { + httpStatus = http.StatusNotFound + } + RespondError(w, r, fmt.Sprintf("cannot retrieve resource view: %s", err), httpStatus) + return + } + var updateRequest mru.Request - if err := updateRequest.UnmarshalJSON(body); err != nil { // decodes request JSON - RespondError(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error + updateRequest.View = *view + query := r.URL.Query() + + if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters + RespondError(w, r, err.Error(), http.StatusBadRequest) return } @@ -510,56 +533,40 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { RespondError(w, r, err.Error(), http.StatusForbidden) return } - } - - if updateRequest.IsNew() { - err = s.api.ResourceCreate(r.Context(), &updateRequest) - if err != nil { - code, err2 := s.translateResourceError(w, r, "resource creation fail", err) - RespondError(w, r, err2.Error(), code) - return - } - } - - if updateRequest.IsUpdate() { - _, err = s.api.ResourceUpdate(r.Context(), &updateRequest.SignedResourceUpdate) + _, err = s.api.ResourceUpdate(r.Context(), &updateRequest) if err != nil { RespondError(w, r, err.Error(), http.StatusInternalServerError) return } } - // at this point both possible operations (create, update or both) were successful - // so in case it was a new resource, then create a manifest and send it over. - - if updateRequest.IsNew() { + if query.Get("manifest") == "1" { // we create a manifest so we can retrieve the resource with bzz:// later - // this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource - // metadata chunk (rootAddr) - m, err := s.api.NewResourceManifest(r.Context(), updateRequest.RootAddr().Hex()) + // this manifest has a special "resource type" manifest, and saves the + // resource view ID used to retrieve the resource later + m, err := s.api.NewResourceManifest(r.Context(), &updateRequest.View) if err != nil { RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) return } - // the key to the manifest will be passed back to the client - // the client can access the root chunk key directly through its Hash member - // the manifest key should be set as content in the resolver of the ENS name - // \TODO update manifest key automatically in ENS + // the client can access the view directly through its resourceView member + // the manifest key can be set as content in the resolver of the ENS name outdata, err := json.Marshal(m) if err != nil { RespondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError) return } fmt.Fprint(w, string(outdata)) + + w.Header().Add("Content-type", "application/json") } - w.Header().Add("Content-type", "application/json") } // Retrieve mutable resource updates: // bzz-resource://<id> - get latest update -// bzz-resource://<id>/<n> - get latest update on period n -// bzz-resource://<id>/<n>/<m> - get update version m of period n +// bzz-resource://<id>/?period=n - get latest update on period n +// bzz-resource://<id>/?period=n&version=m - get update version m of period n // bzz-resource://<id>/meta - get metadata and next version information // <id> = ens name or hash // TODO: Enable pass maxPeriod parameter @@ -569,84 +576,44 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { log.Debug("handle.get.resource", "ruid", ruid) var err error - // resolve the content key. - manifestAddr := uri.Address() - if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr) - if err != nil { - getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) - return - } - } else { - w.Header().Set("Cache-Control", "max-age=2147483648") - } - - // get the root chunk rootAddr from the manifest - rootAddr, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr) - if err != nil { + view, err := s.api.ResolveResourceView(r.Context(), uri, r.URL.Query()) + if err != nil { // couldn't parse query string or retrieve manifest getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", uri.Addr, err), http.StatusNotFound) + httpStatus := http.StatusBadRequest + if err == api.ErrCannotLoadResourceManifest || err == api.ErrCannotResolveResourceURI { + httpStatus = http.StatusNotFound + } + RespondError(w, r, fmt.Sprintf("cannot retrieve resource view: %s", err), httpStatus) return } - log.Debug("handle.get.resource: resolved", "ruid", ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr) - // determine if the query specifies period and version or it is a metadata query - var params []string - if len(uri.Path) > 0 { - if uri.Path == "meta" { - unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), rootAddr) - if err != nil { - getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound) - return - } - rawResponse, err := unsignedUpdateRequest.MarshalJSON() - if err != nil { - RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError) - return - } - w.Header().Add("Content-type", "application/json") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(rawResponse)) - return - - } - - params = strings.Split(uri.Path, "/") - - } - var name string - var data []byte - now := time.Now() - - switch len(params) { - case 0: // latest only - name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatest(rootAddr)) - case 2: // specific period and version - var version uint64 - var period uint64 - version, err = strconv.ParseUint(params[1], 10, 32) + if r.URL.Query().Get("meta") == "1" { + unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), view) if err != nil { - break - } - period, err = strconv.ParseUint(params[0], 10, 32) - if err != nil { - break + getFail.Inc(1) + RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for view=%s: %s", view.Hex(), err), http.StatusNotFound) + return } - name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupVersion(rootAddr, uint32(period), uint32(version))) - case 1: // last version of specific period - var period uint64 - period, err = strconv.ParseUint(params[0], 10, 32) + rawResponse, err := unsignedUpdateRequest.MarshalJSON() if err != nil { - break + RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError) + return } - name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatestVersionInPeriod(rootAddr, uint32(period))) - default: // bogus - err = mru.NewError(storage.ErrInvalidValue, "invalid mutable resource request") + w.Header().Add("Content-type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(rawResponse)) + return + } + + lookupParams := &mru.Query{View: *view} + if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version + RespondError(w, r, fmt.Sprintf("invalid mutable resource request:%s", err), http.StatusBadRequest) + return } + data, err := s.api.ResourceLookup(r.Context(), lookupParams) + // any error from the switch statement will end up here if err != nil { code, err2 := s.translateResourceError(w, r, "mutable resource lookup fail", err) @@ -655,9 +622,9 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { } // All ok, serve the retrieved update - log.Debug("Found update", "name", name, "ruid", ruid) - w.Header().Set("Content-Type", "application/octet-stream") - http.ServeContent(w, r, "", now, bytes.NewReader(data)) + log.Debug("Found update", "view", view.Hex(), "ruid", ruid) + w.Header().Set("Content-Type", api.MimeOctetStream) + http.ServeContent(w, r, "", time.Now(), bytes.NewReader(data)) } func (s *Server) translateResourceError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) { @@ -730,11 +697,9 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) { case uri.Raw(): // allow the request to overwrite the content type using a query // parameter - contentType := "application/octet-stream" if typ := r.URL.Query().Get("content_type"); typ != "" { - contentType = typ + w.Header().Set("Content-Type", typ) } - w.Header().Set("Content-Type", contentType) http.ServeContent(w, r, "", time.Now(), reader) case uri.Hash(): w.Header().Set("Content-Type", "text/plain") @@ -890,8 +855,17 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", contentType) - http.ServeContent(w, r, "", time.Now(), newBufferedReadSeeker(reader, getFileBufferSize)) + if contentType != "" { + w.Header().Set("Content-Type", contentType) + } + + fileName := uri.Addr + if found := path.Base(uri.Path); found != "" && found != "." && found != "/" { + fileName = found + } + w.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s\"", fileName)) + + http.ServeContent(w, r, fileName, time.Now(), newBufferedReadSeeker(reader, getFileBufferSize)) } // The size of buffer used for bufio.Reader on LazyChunkReader passed to diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 4a3ca0429..817519a30 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -30,12 +30,16 @@ import ( "math/big" "mime/multipart" "net/http" + "net/url" "os" + "path" "strconv" "strings" "testing" "time" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -121,8 +125,8 @@ func TestBzzResourceMultihash(t *testing.T) { // add the data our multihash aliased manifest will point to databytes := "bar" - url := fmt.Sprintf("%s/bzz:/", srv.URL) - resp, err := http.Post(url, "text/plain", bytes.NewReader([]byte(databytes))) + testBzzUrl := fmt.Sprintf("%s/bzz:/", srv.URL) + resp, err := http.Post(testBzzUrl, "text/plain", bytes.NewReader([]byte(databytes))) if err != nil { t.Fatal(err) } @@ -140,33 +144,27 @@ func TestBzzResourceMultihash(t *testing.T) { log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) - // our mutable resource "name" - keybytes := "foo.eth" + topic, _ := mru.NewTopic("foo.eth", nil) + updateRequest := mru.NewFirstRequest(topic) - updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ - Name: keybytes, - Frequency: 13, - StartTime: srv.GetCurrentTime(), - Owner: signer.Address(), - }) - if err != nil { - t.Fatal(err) - } - updateRequest.SetData(mh, true) + updateRequest.SetData(mh) if err := updateRequest.Sign(signer); err != nil { t.Fatal(err) } log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) - body, err := updateRequest.MarshalJSON() + testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { t.Fatal(err) } + query := testUrl.Query() + body := updateRequest.AppendValues(query) // this adds all query parameters and returns the data to be posted + query.Set("manifest", "1") // indicate we want a manifest back + testUrl.RawQuery = query.Encode() // create the multihash update - url = fmt.Sprintf("%s/bzz-resource:/", srv.URL) - resp, err = http.Post(url, "application/json", bytes.NewReader(body)) + resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -184,14 +182,14 @@ func TestBzzResourceMultihash(t *testing.T) { t.Fatalf("data %s could not be unmarshaled: %v", b, err) } - correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e" + correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" if rsrcResp.Hex() != correctManifestAddrHex { t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) } // get bzz manifest transparent resource resolve - url = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) - resp, err = http.Get(url) + testBzzUrl = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) + resp, err = http.Get(testBzzUrl) if err != nil { t.Fatal(err) } @@ -215,39 +213,38 @@ func TestBzzResource(t *testing.T) { defer srv.Close() - // our mutable resource "name" - keybytes := "foo.eth" - // data of update 1 - databytes := make([]byte, 666) - _, err := rand.Read(databytes) + update1Data := make([]byte, 666) + update1Timestamp := srv.CurrentTime + _, err := rand.Read(update1Data) if err != nil { t.Fatal(err) } + //data for update 2 + update2Data := []byte("foo") - updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ - Name: keybytes, - Frequency: 13, - StartTime: srv.GetCurrentTime(), - Owner: signer.Address(), - }) + topic, _ := mru.NewTopic("foo.eth", nil) + updateRequest := mru.NewFirstRequest(topic) if err != nil { t.Fatal(err) } - updateRequest.SetData(databytes, false) + updateRequest.SetData(update1Data) if err := updateRequest.Sign(signer); err != nil { t.Fatal(err) } - body, err := updateRequest.MarshalJSON() + // creates resource and sets update 1 + testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { t.Fatal(err) } + urlQuery := testUrl.Query() + body := updateRequest.AppendValues(urlQuery) // this adds all query parameters + urlQuery.Set("manifest", "1") // indicate we want a manifest back + testUrl.RawQuery = urlQuery.Encode() - // creates resource and sets update 1 - url := fmt.Sprintf("%s/bzz-resource:/", srv.URL) - resp, err := http.Post(url, "application/json", bytes.NewReader(body)) + resp, err := http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -265,14 +262,14 @@ func TestBzzResource(t *testing.T) { t.Fatalf("data %s could not be unmarshaled: %v", b, err) } - correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e" + correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" if rsrcResp.Hex() != correctManifestAddrHex { - t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) + t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) } // get the manifest - url = fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, rsrcResp) - resp, err = http.Get(url) + testRawUrl := fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, rsrcResp) + resp, err = http.Get(testRawUrl) if err != nil { t.Fatal(err) } @@ -292,20 +289,20 @@ func TestBzzResource(t *testing.T) { if len(manifest.Entries) != 1 { t.Fatalf("Manifest has %d entries", len(manifest.Entries)) } - correctRootKeyHex := "68f7ba07ac8867a4c841a4d4320e3cdc549df23702dc7285fcb6acf65df48562" - if manifest.Entries[0].Hash != correctRootKeyHex { - t.Fatalf("Expected manifest path '%s', got '%s'", correctRootKeyHex, manifest.Entries[0].Hash) + correctViewHex := "0x666f6f2e65746800000000000000000000000000000000000000000000000000c96aaa54e2d44c299564da76e1cd3184a2386b8d" + if manifest.Entries[0].ResourceView.Hex() != correctViewHex { + t.Fatalf("Expected manifest Resource View '%s', got '%s'", correctViewHex, manifest.Entries[0].ResourceView.Hex()) } // get bzz manifest transparent resource resolve - url = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) - resp, err = http.Get(url) + testBzzUrl := fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) + resp, err = http.Get(testBzzUrl) if err != nil { t.Fatal(err) } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("err %s", resp.Status) + if resp.StatusCode == http.StatusOK { + t.Fatal("Expected error status since resource is not multihash. Received 200 OK") } b, err = ioutil.ReadAll(resp.Body) if err != nil { @@ -313,8 +310,8 @@ func TestBzzResource(t *testing.T) { } // get non-existent name, should fail - url = fmt.Sprintf("%s/bzz-resource:/bar", srv.URL) - resp, err = http.Get(url) + testBzzResUrl := fmt.Sprintf("%s/bzz-resource:/bar", srv.URL) + resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) } @@ -327,8 +324,8 @@ func TestBzzResource(t *testing.T) { // get latest update (1.1) through resource directly log.Info("get update latest = 1.1", "addr", correctManifestAddrHex) - url = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url) + testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) + resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) } @@ -340,16 +337,18 @@ func TestBzzResource(t *testing.T) { if err != nil { t.Fatal(err) } - if !bytes.Equal(databytes, b) { - t.Fatalf("Expected body '%x', got '%x'", databytes, b) + if !bytes.Equal(update1Data, b) { + t.Fatalf("Expected body '%x', got '%x'", update1Data, b) } // update 2 + // Move the clock ahead 1 second + srv.CurrentTime++ log.Info("update 2") // 1.- get metadata about this resource - url = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url + "meta") + testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex) + resp, err = http.Get(testBzzResUrl + "?meta=1") if err != nil { t.Fatal(err) } @@ -365,17 +364,19 @@ func TestBzzResource(t *testing.T) { if err = updateRequest.UnmarshalJSON(b); err != nil { t.Fatalf("Error decoding resource metadata: %s", err) } - data := []byte("foo") - updateRequest.SetData(data, false) + updateRequest.SetData(update2Data) if err = updateRequest.Sign(signer); err != nil { t.Fatal(err) } - body, err = updateRequest.MarshalJSON() + testUrl, err = url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { t.Fatal(err) } + urlQuery = testUrl.Query() + body = updateRequest.AppendValues(urlQuery) // this adds all query parameters + testUrl.RawQuery = urlQuery.Encode() - resp, err = http.Post(url, "application/json", bytes.NewReader(body)) + resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -386,8 +387,8 @@ func TestBzzResource(t *testing.T) { // get latest update (1.2) through resource directly log.Info("get update 1.2") - url = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url) + testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) + resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) } @@ -399,33 +400,23 @@ func TestBzzResource(t *testing.T) { if err != nil { t.Fatal(err) } - if !bytes.Equal(data, b) { - t.Fatalf("Expected body '%x', got '%x'", data, b) + if !bytes.Equal(update2Data, b) { + t.Fatalf("Expected body '%x', got '%x'", update2Data, b) } - // get latest update (1.2) with specified period - log.Info("get update latest = 1.2") - url = fmt.Sprintf("%s/bzz-resource:/%s/1", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("err %s", resp.Status) - } - b, err = ioutil.ReadAll(resp.Body) + // test manifest-less queries + log.Info("get first update in update1Timestamp via direct query") + query := mru.NewQuery(&updateRequest.View, update1Timestamp, lookup.NoClue) + + urlq, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { t.Fatal(err) } - if !bytes.Equal(data, b) { - t.Fatalf("Expected body '%x', got '%x'", data, b) - } - // get first update (1.1) with specified period and version - log.Info("get first update 1.1") - url = fmt.Sprintf("%s/bzz-resource:/%s/1/1", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url) + values := urlq.Query() + query.AppendValues(values) // this adds view query parameters + urlq.RawQuery = values.Encode() + resp, err = http.Get(urlq.String()) if err != nil { t.Fatal(err) } @@ -437,9 +428,10 @@ func TestBzzResource(t *testing.T) { if err != nil { t.Fatal(err) } - if !bytes.Equal(databytes, b) { - t.Fatalf("Expected body '%x', got '%x'", databytes, b) + if !bytes.Equal(update1Data, b) { + t.Fatalf("Expected body '%x', got '%x'", update1Data, b) } + } func TestBzzGetPath(t *testing.T) { @@ -773,6 +765,16 @@ func testBzzTar(encrypted bool, t *testing.T) { } defer resp2.Body.Close() + if h := resp2.Header.Get("Content-Type"); h != "application/x-tar" { + t.Fatalf("Content-Type header expected: application/x-tar, got: %s", h) + } + + expectedFileName := string(swarmHash) + ".tar" + expectedContentDisposition := fmt.Sprintf("inline; filename=\"%s\"", expectedFileName) + if h := resp2.Header.Get("Content-Disposition"); h != expectedContentDisposition { + t.Fatalf("Content-Disposition header expected: %s, got: %s", expectedContentDisposition, h) + } + file, err := ioutil.TempFile("", "swarm-downloaded-tarball") if err != nil { t.Fatal(err) @@ -1108,7 +1110,7 @@ func TestModify(t *testing.T) { res, body := httpDo(testCase.method, testCase.uri, reqBody, testCase.headers, testCase.verbose, t) if res.StatusCode != testCase.expectedStatusCode { - t.Fatalf("expected status code %d but got %d", testCase.expectedStatusCode, res.StatusCode) + t.Fatalf("expected status code %d but got %d, %s", testCase.expectedStatusCode, res.StatusCode, body) } if testCase.assertResponseBody != "" && !strings.Contains(body, testCase.assertResponseBody) { t.Log(body) @@ -1219,19 +1221,25 @@ func TestBzzGetFileWithResolver(t *testing.T) { hash := common.HexToHash(string(swarmHash)) resolver.hash = &hash for _, v := range []struct { - addr string - path string - expectedStatusCode int + addr string + path string + expectedStatusCode int + expectedContentType string + expectedFileName string }{ { - addr: string(swarmHash), - path: fileNames[0], - expectedStatusCode: http.StatusOK, + addr: string(swarmHash), + path: fileNames[0], + expectedStatusCode: http.StatusOK, + expectedContentType: "text/plain", + expectedFileName: path.Base(fileNames[0]), }, { - addr: "somebogusensname", - path: fileNames[0], - expectedStatusCode: http.StatusOK, + addr: "somebogusensname", + path: fileNames[0], + expectedStatusCode: http.StatusOK, + expectedContentType: "text/plain", + expectedFileName: path.Base(fileNames[0]), }, } { req, err := http.NewRequest("GET", fmt.Sprintf(srv.URL+"/bzz:/%s/%s", v.addr, v.path), nil) @@ -1246,6 +1254,16 @@ func TestBzzGetFileWithResolver(t *testing.T) { if serverResponse.StatusCode != v.expectedStatusCode { t.Fatalf("expected %d, got %d", v.expectedStatusCode, serverResponse.StatusCode) } + + if h := serverResponse.Header.Get("Content-Type"); h != v.expectedContentType { + t.Fatalf("Content-Type header expected: %s, got %s", v.expectedContentType, h) + } + + expectedContentDisposition := fmt.Sprintf("inline; filename=\"%s\"", v.expectedFileName) + if h := serverResponse.Header.Get("Content-Disposition"); h != expectedContentDisposition { + t.Fatalf("Content-Disposition header expected: %s, got: %s", expectedContentDisposition, h) + } + } } diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index d44ad2277..06be7323e 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -27,6 +27,8 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" @@ -46,14 +48,15 @@ type Manifest struct { // ManifestEntry represents an entry in a swarm manifest type ManifestEntry struct { - Hash string `json:"hash,omitempty"` - Path string `json:"path,omitempty"` - ContentType string `json:"contentType,omitempty"` - Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size,omitempty"` - ModTime time.Time `json:"mod_time,omitempty"` - Status int `json:"status,omitempty"` - Access *AccessEntry `json:"access,omitempty"` + Hash string `json:"hash,omitempty"` + Path string `json:"path,omitempty"` + ContentType string `json:"contentType,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + ModTime time.Time `json:"mod_time,omitempty"` + Status int `json:"status,omitempty"` + Access *AccessEntry `json:"access,omitempty"` + ResourceView *mru.View `json:"resourceView,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -79,11 +82,11 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, // Manifest hack for supporting Mutable Resource Updates from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewResourceManifest(ctx context.Context, resourceAddr string) (storage.Address, error) { +func (a *API) NewResourceManifest(ctx context.Context, view *mru.View) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ - Hash: resourceAddr, - ContentType: ResourceContentType, + ResourceView: view, + ContentType: ResourceContentType, } manifest.Entries = append(manifest.Entries, entry) data, err := json.Marshal(&manifest) diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go index deb7e9815..72fdb2bd9 100644 --- a/swarm/network/stream/common_test.go +++ b/swarm/network/stream/common_test.go @@ -84,7 +84,7 @@ func createGlobalStore() (string, *mockdb.GlobalStore, error) { return globalStoreDir, globalStore, nil } -func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) { +func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) { // setup addr := network.RandomAddr() // tested peers peer address to := network.NewKademlia(addr.OAddr, network.NewKadParams()) @@ -114,7 +114,7 @@ func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *stora delivery := NewDelivery(to, netStore) netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New - streamer := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), nil) + streamer := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), registryOptions) teardown := func() { streamer.Close() removeDataDir() diff --git a/swarm/network/stream/delivery_test.go b/swarm/network/stream/delivery_test.go index 9fb90eeba..b021b8771 100644 --- a/swarm/network/stream/delivery_test.go +++ b/swarm/network/stream/delivery_test.go @@ -39,7 +39,7 @@ import ( ) func TestStreamerRetrieveRequest(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -75,7 +75,7 @@ func TestStreamerRetrieveRequest(t *testing.T) { } func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -127,7 +127,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) { // upstream request server receives a retrieve Request and responds with // offered hashes or delivery if skipHash is set to true func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { - tester, streamer, localStore, teardown, err := newStreamerTester(t) + tester, streamer, localStore, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -221,7 +221,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { } func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) { - tester, streamer, localStore, teardown, err := newStreamerTester(t) + tester, streamer, localStore, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) diff --git a/swarm/network/stream/messages.go b/swarm/network/stream/messages.go index 117f88044..74c785d58 100644 --- a/swarm/network/stream/messages.go +++ b/swarm/network/stream/messages.go @@ -84,11 +84,13 @@ func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err e defer func() { if err != nil { - if e := p.Send(context.TODO(), SubscribeErrorMsg{ + // The error will be sent as a subscribe error message + // and will not be returned as it will prevent any new message + // exchange between peers over p2p. Instead, error will be returned + // only if there is one from sending subscribe error message. + err = p.Send(context.TODO(), SubscribeErrorMsg{ Error: err.Error(), - }); e != nil { - log.Error("send stream subscribe error message", "err", err) - } + }) } }() diff --git a/swarm/network/stream/peer.go b/swarm/network/stream/peer.go index 5fdaa7b87..ef6bbdf70 100644 --- a/swarm/network/stream/peer.go +++ b/swarm/network/stream/peer.go @@ -18,6 +18,7 @@ package stream import ( "context" + "errors" "fmt" "sync" "time" @@ -46,6 +47,10 @@ func (e *notFoundError) Error() string { return fmt.Sprintf("%s not found for stream %q", e.t, e.s) } +// ErrMaxPeerServers will be returned if peer server limit is reached. +// It will be sent in the SubscribeErrorMsg. +var ErrMaxPeerServers = errors.New("max peer servers") + // Peer is the Peer extension for the streaming protocol type Peer struct { *protocols.Peer @@ -204,6 +209,11 @@ func (p *Peer) setServer(s Stream, o Server, priority uint8) (*server, error) { if p.servers[s] != nil { return nil, fmt.Errorf("server %s already registered", s) } + + if p.streamer.maxPeerServers > 0 && len(p.servers) >= p.streamer.maxPeerServers { + return nil, ErrMaxPeerServers + } + os := &server{ Server: o, stream: s, @@ -346,6 +356,7 @@ func (p *Peer) removeClient(s Stream) error { return newNotFoundError("client", s) } client.close() + delete(p.clients, s) return nil } diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go index 65b8dff5a..1eda06c6a 100644 --- a/swarm/network/stream/stream.go +++ b/swarm/network/stream/stream.go @@ -60,6 +60,7 @@ type Registry struct { delivery *Delivery intervalsStore state.Store doRetrieve bool + maxPeerServers int } // RegistryOptions holds optional values for NewRegistry constructor. @@ -68,6 +69,7 @@ type RegistryOptions struct { DoSync bool DoRetrieve bool SyncUpdateDelay time.Duration + MaxPeerServers int // The limit of servers for each peer in registry } // NewRegistry is Streamer constructor @@ -87,6 +89,7 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy delivery: delivery, intervalsStore: intervalsStore, doRetrieve: options.DoRetrieve, + maxPeerServers: options.MaxPeerServers, } streamer.api = NewAPI(streamer) delivery.getPeer = streamer.getPeer diff --git a/swarm/network/stream/streamer_test.go b/swarm/network/stream/streamer_test.go index 0390a7b9b..0bdebefa7 100644 --- a/swarm/network/stream/streamer_test.go +++ b/swarm/network/stream/streamer_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "errors" + "strconv" "testing" "time" @@ -28,7 +29,7 @@ import ( ) func TestStreamerSubscribe(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -42,7 +43,7 @@ func TestStreamerSubscribe(t *testing.T) { } func TestStreamerRequestSubscription(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -127,7 +128,7 @@ func (self *testServer) Close() { } func TestStreamerDownstreamSubscribeUnsubscribeMsgExchange(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -220,7 +221,7 @@ func TestStreamerDownstreamSubscribeUnsubscribeMsgExchange(t *testing.T) { } func TestStreamerUpstreamSubscribeUnsubscribeMsgExchange(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -287,7 +288,7 @@ func TestStreamerUpstreamSubscribeUnsubscribeMsgExchange(t *testing.T) { } func TestStreamerUpstreamSubscribeUnsubscribeMsgExchangeLive(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -353,7 +354,7 @@ func TestStreamerUpstreamSubscribeUnsubscribeMsgExchangeLive(t *testing.T) { } func TestStreamerUpstreamSubscribeErrorMsgExchange(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -397,7 +398,7 @@ func TestStreamerUpstreamSubscribeErrorMsgExchange(t *testing.T) { } func TestStreamerUpstreamSubscribeLiveAndHistory(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -462,7 +463,7 @@ func TestStreamerUpstreamSubscribeLiveAndHistory(t *testing.T) { } func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -527,7 +528,7 @@ func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) { } func TestStreamerDownstreamOfferedHashesMsgExchange(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -626,7 +627,7 @@ func TestStreamerDownstreamOfferedHashesMsgExchange(t *testing.T) { } func TestStreamerRequestSubscriptionQuitMsgExchange(t *testing.T) { - tester, streamer, _, teardown, err := newStreamerTester(t) + tester, streamer, _, teardown, err := newStreamerTester(t, nil) defer teardown() if err != nil { t.Fatal(err) @@ -752,3 +753,165 @@ func TestStreamerRequestSubscriptionQuitMsgExchange(t *testing.T) { t.Fatal(err) } } + +// TestMaxPeerServersWithUnsubscribe creates a registry with a limited +// number of stream servers, and performs a test with subscriptions and +// unsubscriptions, checking if unsubscriptions will remove streams, +// leaving place for new streams. +func TestMaxPeerServersWithUnsubscribe(t *testing.T) { + var maxPeerServers = 6 + tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{ + MaxPeerServers: maxPeerServers, + }) + defer teardown() + if err != nil { + t.Fatal(err) + } + + streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) { + return newTestServer(t), nil + }) + + node := tester.Nodes[0] + + for i := 0; i < maxPeerServers+10; i++ { + stream := NewStream("foo", strconv.Itoa(i), true) + + err = tester.TestExchanges(p2ptest.Exchange{ + Label: "Subscribe message", + Triggers: []p2ptest.Trigger{ + { + Code: 4, + Msg: &SubscribeMsg{ + Stream: stream, + Priority: Top, + }, + Peer: node.ID(), + }, + }, + Expects: []p2ptest.Expect{ + { + Code: 1, + Msg: &OfferedHashesMsg{ + Stream: stream, + HandoverProof: &HandoverProof{ + Handover: &Handover{}, + }, + Hashes: make([]byte, HashSize), + From: 1, + To: 1, + }, + Peer: node.ID(), + }, + }, + }) + + if err != nil { + t.Fatal(err) + } + + err = tester.TestExchanges(p2ptest.Exchange{ + Label: "unsubscribe message", + Triggers: []p2ptest.Trigger{ + { + Code: 0, + Msg: &UnsubscribeMsg{ + Stream: stream, + }, + Peer: node.ID(), + }, + }, + }) + + if err != nil { + t.Fatal(err) + } + } +} + +// TestMaxPeerServersWithoutUnsubscribe creates a registry with a limited +// number of stream servers, and performs subscriptions to detect subscriptions +// error message exchange. +func TestMaxPeerServersWithoutUnsubscribe(t *testing.T) { + var maxPeerServers = 6 + tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{ + MaxPeerServers: maxPeerServers, + }) + defer teardown() + if err != nil { + t.Fatal(err) + } + + streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) { + return newTestServer(t), nil + }) + + node := tester.Nodes[0] + + for i := 0; i < maxPeerServers+10; i++ { + stream := NewStream("foo", strconv.Itoa(i), true) + + if i >= maxPeerServers { + err = tester.TestExchanges(p2ptest.Exchange{ + Label: "Subscribe message", + Triggers: []p2ptest.Trigger{ + { + Code: 4, + Msg: &SubscribeMsg{ + Stream: stream, + Priority: Top, + }, + Peer: node.ID(), + }, + }, + Expects: []p2ptest.Expect{ + { + Code: 7, + Msg: &SubscribeErrorMsg{ + Error: ErrMaxPeerServers.Error(), + }, + Peer: node.ID(), + }, + }, + }) + + if err != nil { + t.Fatal(err) + } + continue + } + + err = tester.TestExchanges(p2ptest.Exchange{ + Label: "Subscribe message", + Triggers: []p2ptest.Trigger{ + { + Code: 4, + Msg: &SubscribeMsg{ + Stream: stream, + Priority: Top, + }, + Peer: node.ID(), + }, + }, + Expects: []p2ptest.Expect{ + { + Code: 1, + Msg: &OfferedHashesMsg{ + Stream: stream, + HandoverProof: &HandoverProof{ + Handover: &Handover{}, + }, + Hashes: make([]byte, HashSize), + From: 1, + To: 1, + }, + Peer: node.ID(), + }, + }, + }) + + if err != nil { + t.Fatal(err) + } + } +} diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go index 879622b9a..ff18e64c7 100644 --- a/swarm/storage/hasherstore.go +++ b/swarm/storage/hasherstore.go @@ -32,10 +32,13 @@ type hasherStore struct { hashFunc SwarmHasher hashSize int // content hash size refSize int64 // reference size (content hash + possibly encryption key) - nrChunks uint64 // number of chunks to store errC chan error // global error channel doneC chan struct{} // closed by Close() call to indicate that count is the final number of chunks quitC chan struct{} // closed to quit unterminated routines + // nrChunks is used with atomic functions + // it is required to be at the end of the struct to ensure 64bit alignment for arm architecture + // see: https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nrChunks uint64 // number of chunks to store } // NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces. diff --git a/swarm/storage/mru/binaryserializer.go b/swarm/storage/mru/binaryserializer.go new file mode 100644 index 000000000..3123a82ee --- /dev/null +++ b/swarm/storage/mru/binaryserializer.go @@ -0,0 +1,44 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import "github.com/ethereum/go-ethereum/common/hexutil" + +type binarySerializer interface { + binaryPut(serializedData []byte) error + binaryLength() int + binaryGet(serializedData []byte) error +} + +// Values interface represents a string key-value store +// useful for building query strings +type Values interface { + Get(key string) string + Set(key, value string) +} + +type valueSerializer interface { + FromValues(values Values) error + AppendValues(values Values) +} + +// Hex serializes the structure and converts it to a hex string +func Hex(bin binarySerializer) string { + b := make([]byte, bin.binaryLength()) + bin.binaryPut(b) + return hexutil.Encode(b) +} diff --git a/swarm/storage/mru/binaryserializer_test.go b/swarm/storage/mru/binaryserializer_test.go new file mode 100644 index 000000000..f524157d6 --- /dev/null +++ b/swarm/storage/mru/binaryserializer_test.go @@ -0,0 +1,98 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// KV mocks a key value store +type KV map[string]string + +func (kv KV) Get(key string) string { + return kv[key] +} +func (kv KV) Set(key, value string) { + kv[key] = value +} + +func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { + if hexutil.Encode(actualValue) != expectedHex { + t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) + } +} + +func testBinarySerializerRecovery(t *testing.T, bin binarySerializer, expectedHex string) { + name := reflect.TypeOf(bin).Elem().Name() + serialized := make([]byte, bin.binaryLength()) + if err := bin.binaryPut(serialized); err != nil { + t.Fatalf("%s.binaryPut error when trying to serialize structure: %s", name, err) + } + + compareByteSliceToExpectedHex(t, name, serialized, expectedHex) + + recovered := reflect.New(reflect.TypeOf(bin).Elem()).Interface().(binarySerializer) + if err := recovered.binaryGet(serialized); err != nil { + t.Fatalf("%s.binaryGet error when trying to deserialize structure: %s", name, err) + } + + if !reflect.DeepEqual(bin, recovered) { + t.Fatalf("Expected that the recovered %s equals the marshalled %s", name, name) + } + + serializedWrongLength := make([]byte, 1) + copy(serializedWrongLength[:], serialized) + if err := recovered.binaryGet(serializedWrongLength); err == nil { + t.Fatalf("Expected %s.binaryGet to fail since data is too small", name) + } +} + +func testBinarySerializerLengthCheck(t *testing.T, bin binarySerializer) { + name := reflect.TypeOf(bin).Elem().Name() + // make a slice that is too small to contain the metadata + serialized := make([]byte, bin.binaryLength()-1) + + if err := bin.binaryPut(serialized); err == nil { + t.Fatalf("Expected %s.binaryPut to fail, since target slice is too small", name) + } +} + +func testValueSerializer(t *testing.T, v valueSerializer, expected KV) { + name := reflect.TypeOf(v).Elem().Name() + kv := make(KV) + + v.AppendValues(kv) + if !reflect.DeepEqual(expected, kv) { + expj, _ := json.Marshal(expected) + gotj, _ := json.Marshal(kv) + t.Fatalf("Expected %s.AppendValues to return %s, got %s", name, string(expj), string(gotj)) + } + + recovered := reflect.New(reflect.TypeOf(v).Elem()).Interface().(valueSerializer) + err := recovered.FromValues(kv) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(recovered, v) { + t.Fatalf("Expected recovered %s to be the same", name) + } +} diff --git a/swarm/storage/mru/cacheentry.go b/swarm/storage/mru/cacheentry.go new file mode 100644 index 000000000..280331f77 --- /dev/null +++ b/swarm/storage/mru/cacheentry.go @@ -0,0 +1,48 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "bytes" + "context" + "time" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +const ( + hasherCount = 8 + resourceHashAlgorithm = storage.SHA3Hash + defaultRetrieveTimeout = 100 * time.Millisecond +) + +// cacheEntry caches resource data and the metadata of its root chunk. +type cacheEntry struct { + ResourceUpdate + *bytes.Reader + lastKey storage.Address +} + +// implements storage.LazySectionReader +func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { + return int64(len(r.ResourceUpdate.data)), nil +} + +//returns the resource's topic +func (r *cacheEntry) Topic() Topic { + return r.View.Topic +} diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go index e1d7c2c34..19330e0c1 100644 --- a/swarm/storage/mru/doc.go +++ b/swarm/storage/mru/doc.go @@ -1,61 +1,44 @@ -// Package mru defines Mutable resource updates. -// A Mutable Resource is an entity which allows updates to a resource -// without resorting to ENS on each update. -// The update scheme is built on swarm chunks with chunk keys following -// a predictable, versionable pattern. -// -// Updates are defined to be periodic in nature, where the update frequency -// is expressed in seconds. -// -// The root entry of a mutable resource is tied to a unique identifier that -// is deterministically generated out of the metadata content that describes -// the resource. This metadata includes a user-defined resource name, a resource -// start time that indicates when the resource becomes valid, -// the frequency in seconds with which the resource is expected to be updated, both of -// which are stored as little-endian uint64 values in the database (for a -// total of 16 bytes). It also contains the owner's address (ownerAddr) -// This MRU info is stored in a separate content-addressed chunk -// (call it the metadata chunk), with the following layout: -// -// (00|length|startTime|frequency|name|ownerAddr) -// -// (The two first zero-value bytes are used for disambiguation by the chunk validator, -// and update chunk will always have a value > 0 there.) -// -// Each metadata chunk is identified by its rootAddr, calculated as follows: -// metaHash=H(len(metadata), startTime, frequency,name) -// rootAddr = H(metaHash, ownerAddr). -// where H is the SHA3 hash function -// This scheme effectively locks the root chunk so that only the owner of the private key -// that ownerAddr was derived from can sign updates. -// -// The root entry tells the requester from when the mutable resource was -// first added (Unix time in seconds) and in which moments to look for the -// actual updates. Thus, a resource update for identifier "føø.bar" -// starting at unix time 1528800000 with frequency 300 (every 5 mins) will have updates on 1528800300, -// 1528800600, 1528800900 and so on. -// -// Actual data updates are also made in the form of swarm chunks. The keys -// of the updates are the hash of a concatenation of properties as follows: -// -// updateAddr = H(period, version, rootAddr) -// where H is the SHA3 hash function -// The period is (currentTime - startTime) / frequency -// -// Using our previous example, this means that a period 3 will happen when the -// clock hits 1528800900 -// -// If more than one update is made in the same period, incremental -// version numbers are used successively. -// -// A user looking up a resource would only need to know the rootAddr in order to get the versions -// -// the resource update data is: -// resourcedata = headerlength|period|version|rootAddr|flags|metaHash -// where flags is a 1-byte flags field. Flag 0 is set to 1 to indicate multihash -// -// the full update data that goes in the chunk payload is: -// resourcedata|sign(resourcedata) -// -// headerlength is a 16 bit value containing the byte length of period|version|rootAddr|flags|metaHash +/* +Package mru defines Mutable resource updates. + +A Mutable Resource is an entity which allows updates to a resource +without resorting to ENS on each update. +The update scheme is built on swarm chunks with chunk keys following +a predictable, versionable pattern. + +A Resource is tied to a unique identifier that is deterministically generated out of +the chosen topic. + +A Resource View is defined as a specific user's point of view about a particular resource. +Thus, a View is a Topic + the user's address (userAddr) + +Actual data updates are also made in the form of swarm chunks. The keys +of the updates are the hash of a concatenation of properties as follows: + +updateAddr = H(View, Epoch ID) +where H is the SHA3 hash function +View is the combination of Topic and the user address +Epoch ID is a time slot. See the lookup package for more information. + +A user looking up a resource would only need to know the View in order to +another user's updates + +The resource update data is: +resourcedata = View|Epoch|data + +the full update data that goes in the chunk payload is: +resourcedata|sign(resourcedata) + +Structure Summary: + +Request: Resource update with signature + ResourceUpdate: headers + data + Header: Protocol version and reserved for future use placeholders + ID: Information about how to locate a specific update + View: Author of the update and what is updating + Topic: Item that the updates are about + User: User who updates the resource + Epoch: time slot where the update is stored + +*/ package mru diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go index 18c667f14..3e7654795 100644 --- a/swarm/storage/mru/handler.go +++ b/swarm/storage/mru/handler.go @@ -21,11 +21,12 @@ package mru import ( "bytes" "context" + "fmt" "sync" "time" - "unsafe" - "github.com/ethereum/go-ethereum/swarm/chunk" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -33,7 +34,7 @@ import ( type Handler struct { chunkStore *storage.NetStore HashSize int - resources map[uint64]*resource + resources map[uint64]*cacheEntry resourceLock sync.RWMutex storeTimeout time.Duration queryMaxPeriods uint32 @@ -42,12 +43,10 @@ type Handler struct { // HandlerParams pass parameters to the Handler constructor NewHandler // Signer and TimestampProvider are mandatory parameters type HandlerParams struct { - QueryMaxPeriods uint32 } // hashPool contains a pool of ready hashers var hashPool sync.Pool -var minimumChunkLength int // init initializes the package and hashPool func init() { @@ -56,19 +55,12 @@ func init() { return storage.MakeHashFunc(resourceHashAlgorithm)() }, } - if minimumMetadataLength < minimumUpdateDataLength { - minimumChunkLength = minimumMetadataLength - } else { - minimumChunkLength = minimumUpdateDataLength - } } // NewHandler creates a new Mutable Resource API func NewHandler(params *HandlerParams) *Handler { rh := &Handler{ - resources: make(map[uint64]*resource), - storeTimeout: defaultStoreTimeout, - queryMaxPeriods: params.QueryMaxPeriods, + resources: make(map[uint64]*cacheEntry), } for i := 0; i < hasherCount; i++ { @@ -88,44 +80,25 @@ func (h *Handler) SetStore(store *storage.NetStore) { } // Validate is a chunk validation method -// If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature +// If it looks like a resource update, the chunk address is checked against the userAddr of the update's signature // It implements the storage.ChunkValidator interface func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { dataLength := len(data) - if dataLength < minimumChunkLength || dataLength > chunk.DefaultSize+8 { + if dataLength < minimumSignedUpdateLength { return false } - //metadata chunks have the first two bytes set to zero - if data[0] == 0 && data[1] == 0 && dataLength >= minimumMetadataLength { - //metadata chunk - rootAddr, _ := metadataHash(data) - valid := bytes.Equal(chunkAddr, rootAddr) - if !valid { - log.Debug("Invalid root metadata chunk with address", "addr", chunkAddr.Hex()) - } - return valid - } - - // if it is not a metadata chunk, check if it is a properly formatted update chunk with + // check if it is a properly formatted update chunk with // valid signature and proof of ownership of the resource it is trying // to update // First, deserialize the chunk - var r SignedResourceUpdate + var r Request if err := r.fromChunk(chunkAddr, data); err != nil { log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error()) return false } - // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) - // that was used to retrieve this chunk - // if this validation fails, someone forged a chunk. - if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) { - log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr", "addr", chunkAddr.Hex()) - return false - } - // Verify signatures and that the signer actually owns the resource // If it fails, it means either the signature is not valid, data is corrupted // or someone is trying to update someone else's resource. @@ -138,301 +111,134 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { } // GetContent retrieves the data payload of the last synced update of the Mutable Resource -func (h *Handler) GetContent(rootAddr storage.Address) (storage.Address, []byte, error) { - rsrc := h.get(rootAddr) - if rsrc == nil || !rsrc.isSynced() { - return nil, nil, NewError(ErrNotFound, " does not exist or is not synced") - } - return rsrc.lastKey, rsrc.data, nil -} - -// GetLastPeriod retrieves the period of the last synced update of the Mutable Resource -func (h *Handler) GetLastPeriod(rootAddr storage.Address) (uint32, error) { - rsrc := h.get(rootAddr) - if rsrc == nil { - return 0, NewError(ErrNotFound, " does not exist") - } else if !rsrc.isSynced() { - return 0, NewError(ErrNotSynced, " is not synced") +func (h *Handler) GetContent(view *View) (storage.Address, []byte, error) { + if view == nil { + return nil, nil, NewError(ErrInvalidValue, "view is nil") } - return rsrc.period, nil -} - -// GetVersion retrieves the period of the last synced update of the Mutable Resource -func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) { - rsrc := h.get(rootAddr) + rsrc := h.get(view) if rsrc == nil { - return 0, NewError(ErrNotFound, " does not exist") - } else if !rsrc.isSynced() { - return 0, NewError(ErrNotSynced, " is not synced") - } - return rsrc.version, nil -} - -// New creates a new metadata chunk out of the request passed in. -func (h *Handler) New(ctx context.Context, request *Request) error { - - // frequency 0 is invalid - if request.metadata.Frequency == 0 { - return NewError(ErrInvalidValue, "frequency cannot be 0 when creating a resource") + return nil, nil, NewError(ErrNotFound, "resource does not exist") } - - // make sure owner is set to something - if request.metadata.Owner == zeroAddr { - return NewError(ErrInvalidValue, "ownerAddr must be set to create a new metadata chunk") - } - - // create the meta chunk and store it in swarm - chunk, metaHash, err := request.metadata.newChunk() - if err != nil { - return err - } - if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) || - request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Address()) { - return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata") - } - - request.metaHash = metaHash - request.rootAddr = chunk.Address() - - h.chunkStore.Put(ctx, chunk) - log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner) - - // create the internal index for the resource and populate it with its metadata - rsrc := &resource{ - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: UpdateLookup{ - rootAddr: chunk.Address(), - }, - }, - }, - ResourceMetadata: request.metadata, - updated: time.Now(), - } - h.set(chunk.Address(), rsrc) - - return nil + return rsrc.lastKey, rsrc.data, nil } -// NewUpdateRequest prepares an UpdateRequest structure with all the necessary information to +// NewRequest prepares a Request structure with all the necessary information to // just add the desired data and sign it. // The resulting structure can then be signed and passed to Handler.Update to be verified and sent -func (h *Handler) NewUpdateRequest(ctx context.Context, rootAddr storage.Address) (updateRequest *Request, err error) { - - if rootAddr == nil { - return nil, NewError(ErrInvalidValue, "rootAddr cannot be nil") +func (h *Handler) NewRequest(ctx context.Context, view *View) (request *Request, err error) { + if view == nil { + return nil, NewError(ErrInvalidValue, "view cannot be nil") } - // Make sure we have a cache of the metadata chunk - rsrc, err := h.Load(ctx, rootAddr) - if err != nil { - return nil, err - } + now := TimestampProvider.Now().Time + request = new(Request) + request.Header.Version = ProtocolVersion - now := TimestampProvider.Now() + query := NewQueryLatest(view, lookup.NoClue) - updateRequest = new(Request) - updateRequest.period, err = getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency) + rsrc, err := h.Lookup(ctx, query) if err != nil { - return nil, err - } - - if _, err = h.lookup(rsrc, LookupLatestVersionInPeriod(rsrc.rootAddr, updateRequest.period)); err != nil { if err.(*Error).code != ErrNotFound { return nil, err } // not finding updates means that there is a network error - // or that the resource really does not have updates in this period. + // or that the resource really does not have updates } - updateRequest.multihash = rsrc.multihash - updateRequest.rootAddr = rsrc.rootAddr - updateRequest.metaHash = rsrc.metaHash - updateRequest.metadata = rsrc.ResourceMetadata + request.View = *view - // if we already have an update for this period then increment version - // resource object MUST be in sync for version to be correct, but we checked this earlier in the method already - if h.hasUpdate(rootAddr, updateRequest.period) { - updateRequest.version = rsrc.version + 1 + // if we already have an update, then find next epoch + if rsrc != nil { + request.Epoch = lookup.GetNextEpoch(rsrc.Epoch, now) } else { - updateRequest.version = 1 + request.Epoch = lookup.GetFirstEpoch(now) } - return updateRequest, nil + return request, nil } -// Lookup retrieves a specific or latest version of the resource update with metadata chunk at params.Root -// Lookup works differently depending on the configuration of `LookupParams` -// See the `LookupParams` documentation and helper functions: -// `LookupLatest`, `LookupLatestVersionInPeriod` and `LookupVersion` +// Lookup retrieves a specific or latest version of the resource +// Lookup works differently depending on the configuration of `ID` +// See the `ID` documentation and helper functions: +// `LookupLatest` and `LookupBefore` // When looking for the latest update, it starts at the next period after the current time. // upon failure tries the corresponding keys of each previous period until one is found // (or startTime is reached, in which case there are no updates). -func (h *Handler) Lookup(ctx context.Context, params *LookupParams) (*resource, error) { +func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) { - rsrc := h.get(params.rootAddr) - if rsrc == nil { - return nil, NewError(ErrNothingToReturn, "resource not loaded") + timeLimit := query.TimeLimit + if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update + timeLimit = TimestampProvider.Now().Time } - return h.lookup(rsrc, params) -} -// LookupPrevious returns the resource before the one currently loaded in the resource cache -// This is useful where resource updates are used incrementally in contrast to -// merely replacing content. -// Requires a cached resource object to determine the current state of the resource. -func (h *Handler) LookupPrevious(ctx context.Context, params *LookupParams) (*resource, error) { - rsrc := h.get(params.rootAddr) - if rsrc == nil { - return nil, NewError(ErrNothingToReturn, "resource not loaded") - } - if !rsrc.isSynced() { - return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.") - } else if rsrc.period == 0 { - return nil, NewError(ErrNothingToReturn, " not found") - } - var version, period uint32 - if rsrc.version > 1 { - version = rsrc.version - 1 - period = rsrc.period - } else if rsrc.period == 1 { - return nil, NewError(ErrNothingToReturn, "Current update is the oldest") - } else { - version = 0 - period = rsrc.period - 1 + if query.Hint == lookup.NoClue { // try to use our cache + entry := h.get(&query.View) + if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints + query.Hint = entry.Epoch + } } - return h.lookup(rsrc, NewLookupParams(rsrc.rootAddr, period, version, params.Limit)) -} -// base code for public lookup methods -func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error) { - - lp := *params // we can't look for anything without a store if h.chunkStore == nil { return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") } - var specificperiod bool - if lp.period > 0 { - specificperiod = true - } else { - // get the current time and the next period - now := TimestampProvider.Now() - - var period uint32 - period, err := getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency) - if err != nil { - return nil, err - } - lp.period = period - } + var ul ID + ul.View = query.View + var readCount int - // start from the last possible period, and iterate previous ones - // (unless we want a specific period only) until we find a match. - // If we hit startTime we're out of options - var specificversion bool - if lp.version > 0 { - specificversion = true - } else { - lp.version = 1 - } + // Invoke the lookup engine. + // The callback will be called every time the lookup algorithm needs to guess + requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) { + readCount++ + ul.Epoch = epoch + ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) + defer cancel() - var hops uint32 - if lp.Limit == 0 { - lp.Limit = h.queryMaxPeriods - } - log.Trace("resource lookup", "period", lp.period, "version", lp.version, "limit", lp.Limit) - for lp.period > 0 { - if lp.Limit != 0 && hops > lp.Limit { - return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit) + chunk, err := h.chunkStore.Get(ctx, ul.Addr()) + if err != nil { // TODO: check for catastrophic errors other than chunk not found + return nil, nil } - updateAddr := lp.UpdateAddr() - - ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout) - defer cancel() - chunk, err := h.chunkStore.Get(ctx, updateAddr) - if err == nil { - if specificversion { - return h.updateIndex(rsrc, chunk) - } - // check if we have versions > 1. If a version fails, the previous version is used and returned. - log.Trace("rsrc update version 1 found, checking for version updates", "period", lp.period, "updateAddr", updateAddr) - for { - newversion := lp.version + 1 - updateAddr := lp.UpdateAddr() - - ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout) - defer cancel() - - newchunk, err := h.chunkStore.Get(ctx, updateAddr) - if err != nil { - return h.updateIndex(rsrc, chunk) - } - chunk = newchunk - lp.version = newversion - log.Trace("version update found, checking next", "version", lp.version, "period", lp.period, "updateAddr", updateAddr) - } + var request Request + if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil { + return nil, nil } - if specificperiod { - break + if request.Time <= timeLimit { + return &request, nil } - log.Trace("rsrc update not found, checking previous period", "period", lp.period, "updateAddr", updateAddr) - lp.period-- - hops++ - } - return nil, NewError(ErrNotFound, "no updates found") -} - -// Load retrieves the Mutable Resource metadata chunk stored at rootAddr -// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents -func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) { - //TODO: Maybe add timeout to context, defaultRetrieveTimeout? - ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) - defer cancel() - chunk, err := h.chunkStore.Get(ctx, rootAddr) + return nil, nil + }) if err != nil { - return nil, NewError(ErrNotFound, err.Error()) + return nil, err } - // create the index entry - rsrc := &resource{} + log.Info(fmt.Sprintf("Resource lookup finished in %d lookups", readCount)) - if err := rsrc.ResourceMetadata.binaryGet(chunk.Data()); err != nil { // Will fail if this is not really a metadata chunk - return nil, err + request, _ := requestPtr.(*Request) + if request == nil { + return nil, NewError(ErrNotFound, "no updates found") } + return h.updateCache(request) - rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.Data()) - if !bytes.Equal(rsrc.rootAddr, rootAddr) { - return nil, NewError(ErrCorruptData, "Corrupt metadata chunk") - } - h.set(rootAddr, rsrc) - log.Trace("resource index load", "rootkey", rootAddr, "name", rsrc.ResourceMetadata.Name, "starttime", rsrc.ResourceMetadata.StartTime, "frequency", rsrc.ResourceMetadata.Frequency) - return rsrc, nil } -// update mutable resource index map with specified content -func (h *Handler) updateIndex(rsrc *resource, chunk storage.Chunk) (*resource, error) { +// update mutable resource cache map with specified content +func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { - // retrieve metadata from chunk data and check that it matches this mutable resource - var r SignedResourceUpdate - if err := r.fromChunk(chunk.Address(), chunk.Data()); err != nil { - return nil, err + updateAddr := request.Addr() + log.Trace("resource cache update", "topic", request.Topic.Hex(), "updatekey", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) + + rsrc := h.get(&request.View) + if rsrc == nil { + rsrc = &cacheEntry{} + h.set(&request.View, rsrc) } - log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Address(), "period", r.period, "version", r.version) // update our rsrcs entry map - rsrc.lastKey = chunk.Address() - rsrc.period = r.period - rsrc.version = r.version - rsrc.updated = time.Now() - rsrc.data = make([]byte, len(r.data)) - rsrc.multihash = r.multihash - copy(rsrc.data, r.data) + rsrc.lastKey = updateAddr + rsrc.ResourceUpdate = request.ResourceUpdate rsrc.Reader = bytes.NewReader(rsrc.data) - log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Address(), "period", rsrc.period, "version", rsrc.version) - h.set(chunk.Address(), rsrc) return rsrc, nil } @@ -442,23 +248,16 @@ func (h *Handler) updateIndex(rsrc *resource, chunk storage.Chunk) (*resource, e // Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit. // Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update // on the network. -func (h *Handler) Update(ctx context.Context, r *SignedResourceUpdate) (storage.Address, error) { - return h.update(ctx, r) -} - -// create and commit an update -func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAddr storage.Address, err error) { +func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) { // we can't update anything without a store if h.chunkStore == nil { return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") } - rsrc := h.get(r.rootAddr) - if rsrc != nil && rsrc.period != 0 && rsrc.version != 0 && // This is the only cheap check we can do for sure - rsrc.period == r.period && rsrc.version >= r.version { // without having to lookup update chunks - - return nil, NewError(ErrInvalidValue, "A former update in this period is already known to exist") + rsrc := h.get(&r.View) + if rsrc != nil && rsrc.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure + return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist") } chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big @@ -468,49 +267,32 @@ func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAd // send the chunk h.chunkStore.Put(ctx, chunk) - log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.Data(), "multihash", r.multihash) - - // update our resources map entry if the new update is older than the one we have, if we have it. - if rsrc != nil && (r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version)) { - rsrc.period = r.period - rsrc.version = r.version + log.Trace("resource update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) + // update our resources map cache entry if the new update is older than the one we have, if we have it. + if rsrc != nil && r.Epoch.After(rsrc.Epoch) { + rsrc.Epoch = r.Epoch rsrc.data = make([]byte, len(r.data)) - rsrc.updated = time.Now() - rsrc.lastKey = r.updateAddr - rsrc.multihash = r.multihash + rsrc.lastKey = r.idAddr copy(rsrc.data, r.data) rsrc.Reader = bytes.NewReader(rsrc.data) } - return r.updateAddr, nil + + return r.idAddr, nil } -// Retrieves the resource index value for the given nameHash -func (h *Handler) get(rootAddr storage.Address) *resource { - if len(rootAddr) < storage.AddressLength { - log.Warn("Handler.get with invalid rootAddr") - return nil - } - hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0])) +// Retrieves the resource cache value for the given nameHash +func (h *Handler) get(view *View) *cacheEntry { + mapKey := view.mapKey() h.resourceLock.RLock() defer h.resourceLock.RUnlock() - rsrc := h.resources[hashKey] + rsrc := h.resources[mapKey] return rsrc } -// Sets the resource index value for the given nameHash -func (h *Handler) set(rootAddr storage.Address, rsrc *resource) { - if len(rootAddr) < storage.AddressLength { - log.Warn("Handler.set with invalid rootAddr") - return - } - hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0])) +// Sets the resource cache value for the given View +func (h *Handler) set(view *View, rsrc *cacheEntry) { + mapKey := view.mapKey() h.resourceLock.Lock() defer h.resourceLock.Unlock() - h.resources[hashKey] = rsrc -} - -// Checks if we already have an update on this resource, according to the value in the current state of the resource index -func (h *Handler) hasUpdate(rootAddr storage.Address, period uint32) bool { - rsrc := h.get(rootAddr) - return rsrc != nil && rsrc.period == period + h.resources[mapKey] = rsrc } diff --git a/swarm/storage/mru/handler_test.go b/swarm/storage/mru/handler_test.go new file mode 100644 index 000000000..13eb9e51b --- /dev/null +++ b/swarm/storage/mru/handler_test.go @@ -0,0 +1,520 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "bytes" + "context" + "flag" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/crypto" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/chunk" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +var ( + loglevel = flag.Int("loglevel", 3, "loglevel") + startTime = Timestamp{ + Time: uint64(4200), + } + cleanF func() + resourceName = "føø.bar" + hashfunc = storage.MakeHashFunc(storage.DefaultHash) +) + +func init() { + flag.Parse() + log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) +} + +// simulated timeProvider +type fakeTimeProvider struct { + currentTime uint64 +} + +func (f *fakeTimeProvider) Tick() { + f.currentTime++ +} + +func (f *fakeTimeProvider) Set(time uint64) { + f.currentTime = time +} + +func (f *fakeTimeProvider) FastForward(offset uint64) { + f.currentTime += offset +} + +func (f *fakeTimeProvider) Now() Timestamp { + return Timestamp{ + Time: f.currentTime, + } +} + +// make updates and retrieve them based on periods and versions +func TestResourceHandler(t *testing.T) { + + // make fake timeProvider + clock := &fakeTimeProvider{ + currentTime: startTime.Time, // clock starts at t=4200 + } + + // signer containing private key + signer := newAliceSigner() + + rh, datadir, teardownTest, err := setupTest(clock, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + // create a new resource + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topic, _ := NewTopic("Mess with mru code and see what ghost catches you", nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + + // data for updates: + updates := []string{ + "blinky", // t=4200 + "pinky", // t=4242 + "inky", // t=4284 + "clyde", // t=4285 + } + + request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time) + resourcekey := make(map[string]storage.Address) + data := []byte(updates[0]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[0]], err = rh.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 21 seconds + clock.FastForward(21) // t=4221 + + request, err = rh.NewRequest(ctx, &request.View) // this timestamps the update at t = 4221 + if err != nil { + t.Fatal(err) + } + if request.Epoch.Base() != 0 || request.Epoch.Level != lookup.HighestLevel-1 { + t.Fatalf("Suggested epoch BaseTime should be 0 and Epoch level should be %d", lookup.HighestLevel-1) + } + + request.Epoch.Level = lookup.HighestLevel // force level 25 instead of 24 to make it fail + data = []byte(updates[1]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[1]], err = rh.Update(ctx, request) + if err == nil { + t.Fatal("Expected update to fail since an update in this epoch already exists") + } + + // move the clock ahead 21 seconds + clock.FastForward(21) // t=4242 + request, err = rh.NewRequest(ctx, &request.View) + if err != nil { + t.Fatal(err) + } + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[1]], err = rh.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 42 seconds + clock.FastForward(42) // t=4284 + request, err = rh.NewRequest(ctx, &request.View) + if err != nil { + t.Fatal(err) + } + data = []byte(updates[2]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[2]], err = rh.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 1 second + clock.FastForward(1) // t=4285 + request, err = rh.NewRequest(ctx, &request.View) + if err != nil { + t.Fatal(err) + } + if request.Epoch.Base() != 0 || request.Epoch.Level != 22 { + t.Fatalf("Expected epoch base time to be %d, got %d. Expected epoch level to be %d, got %d", 0, request.Epoch.Base(), 22, request.Epoch.Level) + } + data = []byte(updates[3]) + request.SetData(data) + + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[3]], err = rh.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Second) + rh.Close() + + // check we can retrieve the updates after close + clock.FastForward(2000) // t=6285 + + rhparams := &HandlerParams{} + + rh2, err := NewTestHandler(datadir, rhparams) + if err != nil { + t.Fatal(err) + } + + rsrc2, err := rh2.Lookup(ctx, NewQueryLatest(&request.View, lookup.NoClue)) + if err != nil { + t.Fatal(err) + } + + // last update should be "clyde" + if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) { + t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) + } + if rsrc2.Level != 22 { + t.Fatalf("resource epoch level was %d, expected 22", rsrc2.Level) + } + if rsrc2.Base() != 0 { + t.Fatalf("resource epoch base time was %d, expected 0", rsrc2.Base()) + } + log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) + + // specific point in time + rsrc, err := rh2.Lookup(ctx, NewQuery(&request.View, 4284, lookup.NoClue)) + if err != nil { + t.Fatal(err) + } + // check data + if !bytes.Equal(rsrc.data, []byte(updates[2])) { + t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2]) + } + log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) + + // beyond the first should yield an error + rsrc, err = rh2.Lookup(ctx, NewQuery(&request.View, startTime.Time-1, lookup.NoClue)) + if err == nil { + t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data) + } + +} + +const Day = 60 * 60 * 24 +const Year = Day * 365 +const Month = Day * 30 + +func generateData(x uint64) []byte { + return []byte(fmt.Sprintf("%d", x)) +} + +func TestSparseUpdates(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + rh, datadir, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + defer os.RemoveAll(datadir) + + // create a new resource + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + topic, _ := NewTopic("Very slow updates", nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + + // publish one update every 5 years since Unix 0 until today + today := uint64(1533799046) + var epoch lookup.Epoch + var lastUpdateTime uint64 + for T := uint64(0); T < today; T += 5 * Year { + request := NewFirstRequest(view.Topic) + request.Epoch = lookup.GetNextEpoch(epoch, T) + request.data = generateData(T) // this generates some data that depends on T, so we can check later + request.Sign(signer) + if err != nil { + t.Fatal(err) + } + + if _, err := rh.Update(ctx, request); err != nil { + t.Fatal(err) + } + epoch = request.Epoch + lastUpdateTime = T + } + + query := NewQuery(&view, today, lookup.NoClue) + + _, err = rh.Lookup(ctx, query) + if err != nil { + t.Fatal(err) + } + + _, content, err := rh.GetContent(&view) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(generateData(lastUpdateTime), content) { + t.Fatalf("Expected to recover last written value %d, got %s", lastUpdateTime, string(content)) + } + + // lookup the closest update to 35*Year + 6* Month (~ June 2005): + // it should find the update we put on 35*Year, since we were updating every 5 years. + + query.TimeLimit = 35*Year + 6*Month + + _, err = rh.Lookup(ctx, query) + if err != nil { + t.Fatal(err) + } + + _, content, err = rh.GetContent(&view) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(generateData(35*Year), content) { + t.Fatalf("Expected to recover %d, got %s", 35*Year, string(content)) + } +} + +func TestValidator(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key. Alice will be the good girl + signer := newAliceSigner() + + // set up sim timeProvider + rh, _, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + // create new resource + topic, _ := NewTopic(resourceName, nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + mr := NewFirstRequest(view.Topic) + + // chunk with address + data := []byte("foo") + mr.SetData(data) + if err := mr.Sign(signer); err != nil { + t.Fatalf("sign fail: %v", err) + } + + chunk, err := mr.toChunk() + if err != nil { + t.Fatal(err) + } + if !rh.Validate(chunk.Address(), chunk.Data()) { + t.Fatal("Chunk validator fail on update chunk") + } + + address := chunk.Address() + // mess with the address + address[0] = 11 + address[15] = 99 + + if rh.Validate(address, chunk.Data()) { + t.Fatal("Expected Validate to fail with false chunk address") + } +} + +// tests that the content address validator correctly checks the data +// tests that resource update chunks are passed through content address validator +// there is some redundancy in this test as it also tests content addressed chunks, +// which should be evaluated as invalid chunks by this validator +func TestValidatorInStore(t *testing.T) { + + // make fake timeProvider + TimestampProvider = &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + // set up localstore + datadir, err := ioutil.TempDir("", "storage-testresourcevalidator") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(datadir) + + handlerParams := storage.NewDefaultLocalStoreParams() + handlerParams.Init(datadir) + store, err := storage.NewLocalStore(handlerParams, nil) + if err != nil { + t.Fatal(err) + } + + // set up resource handler and add is as a validator to the localstore + rhParams := &HandlerParams{} + rh := NewHandler(rhParams) + store.Validators = append(store.Validators, rh) + + // create content addressed chunks, one good, one faulty + chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) + goodChunk := chunks[0] + badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) + + topic, _ := NewTopic("xyzzy", nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + + // create a resource update chunk with correct publickey + id := ID{ + Epoch: lookup.Epoch{Time: 42, + Level: 1, + }, + View: view, + } + + updateAddr := id.Addr() + data := []byte("bar") + + r := new(Request) + r.idAddr = updateAddr + r.ResourceUpdate.ID = id + r.data = data + + r.Sign(signer) + + uglyChunk, err := r.toChunk() + if err != nil { + t.Fatal(err) + } + + // put the chunks in the store and check their error status + err = store.Put(context.Background(), goodChunk) + if err == nil { + t.Fatal("expected error on good content address chunk with resource validator only, but got nil") + } + err = store.Put(context.Background(), badChunk) + if err == nil { + t.Fatal("expected error on bad content address chunk with resource validator only, but got nil") + } + err = store.Put(context.Background(), uglyChunk) + if err != nil { + t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err) + } +} + +// create rpc and resourcehandler +func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) { + + var fsClean func() + var rpcClean func() + cleanF = func() { + if fsClean != nil { + fsClean() + } + if rpcClean != nil { + rpcClean() + } + } + + // temp datadir + datadir, err = ioutil.TempDir("", "rh") + if err != nil { + return nil, "", nil, err + } + fsClean = func() { + os.RemoveAll(datadir) + } + + TimestampProvider = timeProvider + rhparams := &HandlerParams{} + rh, err = NewTestHandler(datadir, rhparams) + return rh, datadir, cleanF, err +} + +func newAliceSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + return NewGenericSigner(privKey) +} + +func newBobSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") + return NewGenericSigner(privKey) +} + +func newCharlieSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") + return NewGenericSigner(privKey) +} + +func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { + chunk, err := rh.chunkStore.Get(context.TODO(), addr) + if err != nil { + return nil, err + } + var r Request + if err := r.fromChunk(addr, chunk.Data()); err != nil { + return nil, err + } + return r.data, nil +} diff --git a/swarm/storage/mru/id.go b/swarm/storage/mru/id.go new file mode 100644 index 000000000..f008169ed --- /dev/null +++ b/swarm/storage/mru/id.go @@ -0,0 +1,123 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "fmt" + "hash" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// ID uniquely identifies an update on the network. +type ID struct { + View `json:"view"` + lookup.Epoch `json:"epoch"` +} + +// ID layout: +// View viewLength bytes +// Epoch EpochLength +const idLength = viewLength + lookup.EpochLength + +// Addr calculates the resource update chunk address corresponding to this ID +func (u *ID) Addr() (updateAddr storage.Address) { + serializedData := make([]byte, idLength) + var cursor int + u.View.binaryPut(serializedData[cursor : cursor+viewLength]) + cursor += viewLength + + eid := u.Epoch.ID() + copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:]) + + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + return hasher.Sum(nil) +} + +// binaryPut serializes this instance into the provided slice +func (u *ID) binaryPut(serializedData []byte) error { + if len(serializedData) != idLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData)) + } + var cursor int + if err := u.View.binaryPut(serializedData[cursor : cursor+viewLength]); err != nil { + return err + } + cursor += viewLength + + epochBytes, err := u.Epoch.MarshalBinary() + if err != nil { + return err + } + copy(serializedData[cursor:cursor+lookup.EpochLength], epochBytes[:]) + cursor += lookup.EpochLength + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (u *ID) binaryLength() int { + return idLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (u *ID) binaryGet(serializedData []byte) error { + if len(serializedData) != idLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read ID. Expected %d, got %d", idLength, len(serializedData)) + } + + var cursor int + if err := u.View.binaryGet(serializedData[cursor : cursor+viewLength]); err != nil { + return err + } + cursor += viewLength + + if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil { + return err + } + cursor += lookup.EpochLength + + return nil +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (u *ID) FromValues(values Values) error { + level, _ := strconv.ParseUint(values.Get("level"), 10, 32) + u.Epoch.Level = uint8(level) + u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64) + + if u.View.User == (common.Address{}) { + return u.View.FromValues(values) + } + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (u *ID) AppendValues(values Values) { + values.Set("level", fmt.Sprintf("%d", u.Epoch.Level)) + values.Set("time", fmt.Sprintf("%d", u.Epoch.Time)) + u.View.AppendValues(values) +} diff --git a/swarm/storage/mru/id_test.go b/swarm/storage/mru/id_test.go new file mode 100644 index 000000000..eba58fbf3 --- /dev/null +++ b/swarm/storage/mru/id_test.go @@ -0,0 +1,28 @@ +package mru + +import ( + "testing" + + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +func getTestID() *ID { + return &ID{ + View: *getTestView(), + Epoch: lookup.GetFirstEpoch(1000), + } +} + +func TestIDAddr(t *testing.T) { + ul := getTestID() + updateAddr := ul.Addr() + compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783") +} + +func TestIDSerializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestID(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019") +} + +func TestIDLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestID()) +} diff --git a/swarm/storage/mru/lookup.go b/swarm/storage/mru/lookup.go deleted file mode 100644 index b52cd5b4f..000000000 --- a/swarm/storage/mru/lookup.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "encoding/binary" - "hash" - - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// LookupParams is used to specify constraints when performing an update lookup -// Limit defines whether or not the lookup should be limited -// If Limit is set to true then Max defines the amount of hops that can be performed -type LookupParams struct { - UpdateLookup - Limit uint32 -} - -// RootAddr returns the metadata chunk address -func (r *LookupParams) RootAddr() storage.Address { - return r.rootAddr -} - -func NewLookupParams(rootAddr storage.Address, period, version uint32, limit uint32) *LookupParams { - return &LookupParams{ - UpdateLookup: UpdateLookup{ - period: period, - version: version, - rootAddr: rootAddr, - }, - Limit: limit, - } -} - -// LookupLatest generates lookup parameters that look for the latest version of a resource -func LookupLatest(rootAddr storage.Address) *LookupParams { - return NewLookupParams(rootAddr, 0, 0, 0) -} - -// LookupLatestVersionInPeriod generates lookup parameters that look for the latest version of a resource in a given period -func LookupLatestVersionInPeriod(rootAddr storage.Address, period uint32) *LookupParams { - return NewLookupParams(rootAddr, period, 0, 0) -} - -// LookupVersion generates lookup parameters that look for a specific version of a resource -func LookupVersion(rootAddr storage.Address, period, version uint32) *LookupParams { - return NewLookupParams(rootAddr, period, version, 0) -} - -// UpdateLookup represents the components of a resource update search key -type UpdateLookup struct { - period uint32 - version uint32 - rootAddr storage.Address -} - -// 4 bytes period -// 4 bytes version -// storage.Keylength for rootAddr -const updateLookupLength = 4 + 4 + storage.AddressLength - -// UpdateAddr calculates the resource update chunk address corresponding to this lookup key -func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) { - serializedData := make([]byte, updateLookupLength) - u.binaryPut(serializedData) - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(serializedData) - return hasher.Sum(nil) -} - -// binaryPut serializes this UpdateLookup instance into the provided slice -func (u *UpdateLookup) binaryPut(serializedData []byte) error { - if len(serializedData) != updateLookupLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData)) - } - if len(u.rootAddr) != storage.AddressLength { - return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set") - } - binary.LittleEndian.PutUint32(serializedData[:4], u.period) - binary.LittleEndian.PutUint32(serializedData[4:8], u.version) - copy(serializedData[8:], u.rootAddr[:]) - return nil -} - -// binaryLength returns the expected size of this structure when serialized -func (u *UpdateLookup) binaryLength() int { - return updateLookupLength -} - -// binaryGet restores the current instance from the information contained in the passed slice -func (u *UpdateLookup) binaryGet(serializedData []byte) error { - if len(serializedData) != updateLookupLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData)) - } - u.period = binary.LittleEndian.Uint32(serializedData[:4]) - u.version = binary.LittleEndian.Uint32(serializedData[4:8]) - u.rootAddr = storage.Address(make([]byte, storage.AddressLength)) - copy(u.rootAddr[:], serializedData[8:]) - return nil -} diff --git a/swarm/storage/mru/lookup/epoch.go b/swarm/storage/mru/lookup/epoch.go new file mode 100644 index 000000000..bafe95477 --- /dev/null +++ b/swarm/storage/mru/lookup/epoch.go @@ -0,0 +1,91 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package lookup + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// Epoch represents a time slot at a particular frequency level +type Epoch struct { + Time uint64 `json:"time"` // Time stores the time at which the update or lookup takes place + Level uint8 `json:"level"` // Level indicates the frequency level as the exponent of a power of 2 +} + +// EpochID is a unique identifier for an Epoch, based on its level and base time. +type EpochID [8]byte + +// EpochLength stores the serialized binary length of an Epoch +const EpochLength = 8 + +// MaxTime contains the highest possible time value an Epoch can handle +const MaxTime uint64 = (1 << 56) - 1 + +// Base returns the base time of the Epoch +func (e *Epoch) Base() uint64 { + return getBaseTime(e.Time, e.Level) +} + +// ID Returns the unique identifier of this epoch +func (e *Epoch) ID() EpochID { + base := e.Base() + var id EpochID + binary.LittleEndian.PutUint64(id[:], base) + id[7] = e.Level + return id +} + +// MarshalBinary implements the encoding.BinaryMarshaller interface +func (e *Epoch) MarshalBinary() (data []byte, err error) { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b[:], e.Time) + b[7] = e.Level + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaller interface +func (e *Epoch) UnmarshalBinary(data []byte) error { + if len(data) != EpochLength { + return errors.New("Invalid data unmarshalling Epoch") + } + b := make([]byte, 8) + copy(b, data) + e.Level = b[7] + b[7] = 0 + e.Time = binary.LittleEndian.Uint64(b) + return nil +} + +// After returns true if this epoch occurs later or exactly at the other epoch. +func (e *Epoch) After(epoch Epoch) bool { + if e.Time == epoch.Time { + return e.Level < epoch.Level + } + return e.Time >= epoch.Time +} + +// Equals compares two epochs and returns true if they refer to the same time period. +func (e *Epoch) Equals(epoch Epoch) bool { + return e.Level == epoch.Level && e.Base() == epoch.Base() +} + +// String implements the Stringer interface. +func (e *Epoch) String() string { + return fmt.Sprintf("Epoch{Time:%d, Level:%d}", e.Time, e.Level) +} diff --git a/swarm/storage/mru/lookup/epoch_test.go b/swarm/storage/mru/lookup/epoch_test.go new file mode 100644 index 000000000..62cf5523d --- /dev/null +++ b/swarm/storage/mru/lookup/epoch_test.go @@ -0,0 +1,57 @@ +package lookup_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +func TestMarshallers(t *testing.T) { + + for i := uint64(1); i < lookup.MaxTime; i *= 3 { + e := lookup.Epoch{ + Time: i, + Level: uint8(i % 20), + } + b, err := e.MarshalBinary() + if err != nil { + t.Fatal(err) + } + var e2 lookup.Epoch + if err := e2.UnmarshalBinary(b); err != nil { + t.Fatal(err) + } + if e != e2 { + t.Fatal("Expected unmarshalled epoch to be equal to marshalled onet.Fatal(err)") + } + } + +} + +func TestAfter(t *testing.T) { + a := lookup.Epoch{ + Time: 5, + Level: 3, + } + b := lookup.Epoch{ + Time: 6, + Level: 3, + } + c := lookup.Epoch{ + Time: 6, + Level: 4, + } + + if !b.After(a) { + t.Fatal("Expected 'after' to be true, got false") + } + + if b.After(b) { + t.Fatal("Expected 'after' to be false when both epochs are identical, got true") + } + + if !b.After(c) { + t.Fatal("Expected 'after' to be true when both epochs have the same time but the level is lower in the first one, but got false") + } + +} diff --git a/swarm/storage/mru/lookup/lookup.go b/swarm/storage/mru/lookup/lookup.go new file mode 100644 index 000000000..c98248d70 --- /dev/null +++ b/swarm/storage/mru/lookup/lookup.go @@ -0,0 +1,180 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +/* +Package lookup defines resource lookup algorithms and provides tools to place updates +so they can be found +*/ +package lookup + +const maxuint64 = ^uint64(0) + +// LowestLevel establishes the frequency resolution of the lookup algorithm as a power of 2. +const LowestLevel uint8 = 0 // default is 0 (1 second) + +// HighestLevel sets the lowest frequency the algorithm will operate at, as a power of 2. +// 25 -> 2^25 equals to roughly one year. +const HighestLevel = 25 // default is 25 (~1 year) + +// DefaultLevel sets what level will be chosen to search when there is no hint +const DefaultLevel = HighestLevel + +//Algorithm is the function signature of a lookup algorithm +type Algorithm func(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) + +// Lookup finds the update with the highest timestamp that is smaller or equal than 'now' +// It takes a hint which should be the epoch where the last known update was +// If you don't know in what epoch the last update happened, simply submit lookup.NoClue +// read() will be called on each lookup attempt +// Returns an error only if read() returns an error +// Returns nil if an update was not found +var Lookup Algorithm = FluzCapacitorAlgorithm + +// ReadFunc is a handler called by Lookup each time it attempts to find a value +// It should return <nil> if a value is not found +// It should return <nil> if a value is found, but its timestamp is higher than "now" +// It should only return an error in case the handler wants to stop the +// lookup process entirely. +type ReadFunc func(epoch Epoch, now uint64) (interface{}, error) + +// NoClue is a hint that can be provided when the Lookup caller does not have +// a clue about where the last update may be +var NoClue = Epoch{} + +// getBaseTime returns the epoch base time of the given +// time and level +func getBaseTime(t uint64, level uint8) uint64 { + return t & (maxuint64 << level) +} + +// Hint creates a hint based only on the last known update time +func Hint(last uint64) Epoch { + return Epoch{ + Time: last, + Level: DefaultLevel, + } +} + +// GetNextLevel returns the frequency level a next update should be placed at, provided where +// the last update was and what time it is now. +// This is the first nonzero bit of the XOR of 'last' and 'now', counting from the highest significant bit +// but limited to not return a level that is smaller than the last-1 +func GetNextLevel(last Epoch, now uint64) uint8 { + // First XOR the last epoch base time with the current clock. + // This will set all the common most significant bits to zero. + mix := (last.Base() ^ now) + + // Then, make sure we stop the below loop before one level below the current, by setting + // that level's bit to 1. + // If the next level is lower than the current one, it must be exactly level-1 and not lower. + mix |= (1 << (last.Level - 1)) + + // if the last update was more than 2^highestLevel seconds ago, choose the highest level + if mix > (maxuint64 >> (64 - HighestLevel - 1)) { + return HighestLevel + } + + // set up a mask to scan for nonzero bits, starting at the highest level + mask := uint64(1 << (HighestLevel)) + + for i := uint8(HighestLevel); i > LowestLevel; i-- { + if mix&mask != 0 { // if we find a nonzero bit, this is the level the next update should be at. + return i + } + mask = mask >> 1 // move our bit one position to the right + } + return 0 +} + +// GetNextEpoch returns the epoch where the next update should be located +// according to where the previous update was +// and what time it is now. +func GetNextEpoch(last Epoch, now uint64) Epoch { + if last == NoClue { + return GetFirstEpoch(now) + } + level := GetNextLevel(last, now) + return Epoch{ + Level: level, + Time: now, + } +} + +// GetFirstEpoch returns the epoch where the first update should be located +// based on what time it is now. +func GetFirstEpoch(now uint64) Epoch { + return Epoch{Level: HighestLevel, Time: now} +} + +var worstHint = Epoch{Time: 0, Level: 63} + +// FluzCapacitorAlgorithm works by narrowing the epoch search area if an update is found +// going back and forth in time +// First, it will attempt to find an update where it should be now if the hint was +// really the last update. If that lookup fails, then the last update must be either the hint itself +// or the epochs right below. If however, that lookup succeeds, then the update must be +// that one or within the epochs right below. +// see the guide for a more graphical representation +func FluzCapacitorAlgorithm(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) { + var lastFound interface{} + var epoch Epoch + if hint == NoClue { + hint = worstHint + } + + t := now + + for { + epoch = GetNextEpoch(hint, t) + value, err = read(epoch, now) + if err != nil { + return nil, err + } + if value != nil { + lastFound = value + if epoch.Level == LowestLevel || epoch.Equals(hint) { + return value, nil + } + hint = epoch + continue + } + if epoch.Base() == hint.Base() { + if lastFound != nil { + return lastFound, nil + } + // we have reached the hint itself + if hint == worstHint { + return nil, nil + } + // check it out + value, err = read(hint, now) + if err != nil { + return nil, err + } + if value != nil { + return value, nil + } + // bad hint. + epoch = hint + hint = worstHint + } + base := epoch.Base() + if base == 0 { + return nil, nil + } + t = base - 1 + } +} diff --git a/swarm/storage/mru/lookup/lookup_test.go b/swarm/storage/mru/lookup/lookup_test.go new file mode 100644 index 000000000..34bcb61f0 --- /dev/null +++ b/swarm/storage/mru/lookup/lookup_test.go @@ -0,0 +1,414 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package lookup_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +type Data struct { + Payload uint64 + Time uint64 +} + +type Store map[lookup.EpochID]*Data + +func write(store Store, epoch lookup.Epoch, value *Data) { + log.Debug("Write: %d-%d, value='%d'\n", epoch.Base(), epoch.Level, value.Payload) + store[epoch.ID()] = value +} + +func update(store Store, last lookup.Epoch, now uint64, value *Data) lookup.Epoch { + epoch := lookup.GetNextEpoch(last, now) + + write(store, epoch, value) + + return epoch +} + +const Day = 60 * 60 * 24 +const Year = Day * 365 +const Month = Day * 30 + +func makeReadFunc(store Store, counter *int) lookup.ReadFunc { + return func(epoch lookup.Epoch, now uint64) (interface{}, error) { + *counter++ + data := store[epoch.ID()] + var valueStr string + if data != nil { + valueStr = fmt.Sprintf("%d", data.Payload) + } + log.Debug("Read: %d-%d, value='%s'\n", epoch.Base(), epoch.Level, valueStr) + if data != nil && data.Time <= now { + return data, nil + } + return nil, nil + } +} + +func TestLookup(t *testing.T) { + + store := make(Store) + readCount := 0 + readFunc := makeReadFunc(store, &readCount) + + // write an update every month for 12 months 3 years ago and then silence for two years + now := uint64(1533799046) + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i < 12; i++ { + t := uint64(now - Year*3 + i*Month) + data := Data{ + Payload: t, //our "payload" will be the timestamp itself. + Time: t, + } + epoch = update(store, epoch, t, &data) + lastData = &data + } + + // try to get the last value + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + readCountWithoutHint := readCount + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer or same reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + + // try to get an intermediate value + // if we look for a value in now - Year*3 + 6*Month, we should get that value + // Since the "payload" is the timestamp itself, we can check this. + + expectedTime := now - Year*3 + 6*Month + + value, err = lookup.Lookup(expectedTime, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + data, ok := value.(*Data) + + if !ok { + t.Fatal("Expected value to contain data") + } + + if data.Time != expectedTime { + t.Fatalf("Expected value timestamp to be %d, got %d", data.Time, expectedTime) + } + +} + +func TestOneUpdateAt0(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + var epoch lookup.Epoch + data := Data{ + Payload: 79, + Time: 0, + } + update(store, epoch, 0, &data) + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + if value != &data { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) + } +} + +// Tests the update is found even when a bad hint is given +func TestBadHint(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + var epoch lookup.Epoch + data := Data{ + Payload: 79, + Time: 0, + } + + // place an update for t=1200 + update(store, epoch, 1200, &data) + + // come up with some evil hint + badHint := lookup.Epoch{ + Level: 18, + Time: 1200000000, + } + + value, err := lookup.Lookup(now, badHint, readFunc) + if err != nil { + t.Fatal(err) + } + if value != &data { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) + } +} + +func TestLookupFail(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + // don't write anything and try to look up. + // we're testing we don't get stuck in a loop + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + if value != nil { + t.Fatal("Expected value to be nil, since the update should've failed") + } + + expectedReads := now/(1<<lookup.HighestLevel) + 1 + if uint64(readCount) != expectedReads { + t.Fatalf("Expected lookup to fail after %d reads. Did %d reads.", expectedReads, readCount) + } +} + +func TestHighFreqUpdates(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + // write an update every second for the last 1000 seconds + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i <= 994; i++ { + T := uint64(now - 1000 + i) + data := Data{ + Payload: T, //our "payload" will be the timestamp itself. + Time: T, + } + epoch = update(store, epoch, T, &data) + lastData = &data + } + + value, err := lookup.Lookup(lastData.Time, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + readCountWithoutHint := readCount + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer or equal reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + + for i := uint64(0); i <= 994; i++ { + T := uint64(now - 1000 + i) // update every second for the last 1000 seconds + value, err := lookup.Lookup(T, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + data, _ := value.(*Data) + if data == nil { + t.Fatalf("Expected lookup to return %d, got nil", T) + } + if data.Payload != T { + t.Fatalf("Expected lookup to return %d, got %d", T, data.Time) + } + } +} + +func TestSparseUpdates(t *testing.T) { + + store := make(Store) + readCount := 0 + readFunc := makeReadFunc(store, &readCount) + + // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence + + now := uint64(1533799046) + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i < 5; i++ { + T := uint64(Year * 5 * i) // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence + data := Data{ + Payload: T, //our "payload" will be the timestamp itself. + Time: T, + } + epoch = update(store, epoch, T, &data) + lastData = &data + } + + // try to get the last value + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + readCountWithoutHint := readCount + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + +} + +// testG will hold precooked test results +// fields are abbreviated to reduce the size of the literal below +type testG struct { + e lookup.Epoch // last + n uint64 // next level + x uint8 // expected result +} + +// test cases +var testGetNextLevelCases []testG = []testG{{e: lookup.Epoch{Time: 989875233, Level: 12}, n: 989875233, x: 11}, {e: lookup.Epoch{Time: 995807650, Level: 18}, n: 995598156, x: 19}, {e: lookup.Epoch{Time: 969167082, Level: 0}, n: 968990357, x: 18}, {e: lookup.Epoch{Time: 993087628, Level: 14}, n: 992987044, x: 20}, {e: lookup.Epoch{Time: 963364631, Level: 20}, n: 963364630, x: 19}, {e: lookup.Epoch{Time: 963497510, Level: 16}, n: 963370732, x: 18}, {e: lookup.Epoch{Time: 955421349, Level: 22}, n: 955421348, x: 21}, {e: lookup.Epoch{Time: 968220379, Level: 15}, n: 968220378, x: 14}, {e: lookup.Epoch{Time: 939129014, Level: 6}, n: 939128771, x: 11}, {e: lookup.Epoch{Time: 907847903, Level: 6}, n: 907791833, x: 18}, {e: lookup.Epoch{Time: 910835564, Level: 15}, n: 910835564, x: 14}, {e: lookup.Epoch{Time: 913578333, Level: 22}, n: 881808431, x: 25}, {e: lookup.Epoch{Time: 895818460, Level: 3}, n: 895818132, x: 9}, {e: lookup.Epoch{Time: 903843025, Level: 24}, n: 895609561, x: 23}, {e: lookup.Epoch{Time: 877889433, Level: 13}, n: 877877093, x: 15}, {e: lookup.Epoch{Time: 901450396, Level: 10}, n: 901450058, x: 9}, {e: lookup.Epoch{Time: 925179910, Level: 3}, n: 925168393, x: 16}, {e: lookup.Epoch{Time: 913485477, Level: 21}, n: 913485476, x: 20}, {e: lookup.Epoch{Time: 924462991, Level: 18}, n: 924462990, x: 17}, {e: lookup.Epoch{Time: 941175128, Level: 13}, n: 941175127, x: 12}, {e: lookup.Epoch{Time: 920126583, Level: 3}, n: 920100782, x: 19}, {e: lookup.Epoch{Time: 932403200, Level: 9}, n: 932279891, x: 17}, {e: lookup.Epoch{Time: 948284931, Level: 2}, n: 948284921, x: 9}, {e: lookup.Epoch{Time: 953540997, Level: 7}, n: 950547986, x: 22}, {e: lookup.Epoch{Time: 926639837, Level: 18}, n: 918608882, x: 24}, {e: lookup.Epoch{Time: 954637598, Level: 1}, n: 954578761, x: 17}, {e: lookup.Epoch{Time: 943482981, Level: 10}, n: 942924151, x: 19}, {e: lookup.Epoch{Time: 963580771, Level: 7}, n: 963580771, x: 6}, {e: lookup.Epoch{Time: 993744930, Level: 7}, n: 993690858, x: 16}, {e: lookup.Epoch{Time: 1018890213, Level: 12}, n: 1018890212, x: 11}, {e: lookup.Epoch{Time: 1030309411, Level: 2}, n: 1030309227, x: 9}, {e: lookup.Epoch{Time: 1063204997, Level: 20}, n: 1063204996, x: 19}, {e: lookup.Epoch{Time: 1094340832, Level: 6}, n: 1094340633, x: 7}, {e: lookup.Epoch{Time: 1077880597, Level: 10}, n: 1075914292, x: 20}, {e: lookup.Epoch{Time: 1051114957, Level: 18}, n: 1051114957, x: 17}, {e: lookup.Epoch{Time: 1045649701, Level: 22}, n: 1045649700, x: 21}, {e: lookup.Epoch{Time: 1066198885, Level: 14}, n: 1066198884, x: 13}, {e: lookup.Epoch{Time: 1053231952, Level: 1}, n: 1053210845, x: 16}, {e: lookup.Epoch{Time: 1068763404, Level: 14}, n: 1068675428, x: 18}, {e: lookup.Epoch{Time: 1039042173, Level: 15}, n: 1038973110, x: 17}, {e: lookup.Epoch{Time: 1050747636, Level: 6}, n: 1050747364, x: 9}, {e: lookup.Epoch{Time: 1030034434, Level: 23}, n: 1030034433, x: 22}, {e: lookup.Epoch{Time: 1003783425, Level: 18}, n: 1003783424, x: 17}, {e: lookup.Epoch{Time: 988163976, Level: 15}, n: 988084064, x: 17}, {e: lookup.Epoch{Time: 1007222377, Level: 15}, n: 1007222377, x: 14}, {e: lookup.Epoch{Time: 1001211375, Level: 13}, n: 1001208178, x: 14}, {e: lookup.Epoch{Time: 997623199, Level: 8}, n: 997623198, x: 7}, {e: lookup.Epoch{Time: 1026283830, Level: 10}, n: 1006681704, x: 24}, {e: lookup.Epoch{Time: 1019421907, Level: 20}, n: 1019421906, x: 19}, {e: lookup.Epoch{Time: 1043154306, Level: 16}, n: 1043108343, x: 16}, {e: lookup.Epoch{Time: 1075643767, Level: 17}, n: 1075325898, x: 18}, {e: lookup.Epoch{Time: 1043726309, Level: 20}, n: 1043726308, x: 19}, {e: lookup.Epoch{Time: 1056415324, Level: 17}, n: 1056415324, x: 16}, {e: lookup.Epoch{Time: 1088650219, Level: 13}, n: 1088650218, x: 12}, {e: lookup.Epoch{Time: 1088551662, Level: 7}, n: 1088543355, x: 13}, {e: lookup.Epoch{Time: 1069667265, Level: 6}, n: 1069667075, x: 7}, {e: lookup.Epoch{Time: 1079145970, Level: 18}, n: 1079145969, x: 17}, {e: lookup.Epoch{Time: 1083338876, Level: 7}, n: 1083338875, x: 6}, {e: lookup.Epoch{Time: 1051581086, Level: 4}, n: 1051568869, x: 14}, {e: lookup.Epoch{Time: 1028430882, Level: 4}, n: 1028430864, x: 5}, {e: lookup.Epoch{Time: 1057356462, Level: 1}, n: 1057356417, x: 5}, {e: lookup.Epoch{Time: 1033104266, Level: 0}, n: 1033097479, x: 13}, {e: lookup.Epoch{Time: 1031391367, Level: 11}, n: 1031387304, x: 14}, {e: lookup.Epoch{Time: 1049781164, Level: 15}, n: 1049781163, x: 14}, {e: lookup.Epoch{Time: 1027271628, Level: 12}, n: 1027271627, x: 11}, {e: lookup.Epoch{Time: 1057270560, Level: 23}, n: 1057270560, x: 22}, {e: lookup.Epoch{Time: 1047501317, Level: 15}, n: 1047501317, x: 14}, {e: lookup.Epoch{Time: 1058349035, Level: 11}, n: 1045175573, x: 24}, {e: lookup.Epoch{Time: 1057396147, Level: 20}, n: 1057396147, x: 19}, {e: lookup.Epoch{Time: 1048906375, Level: 18}, n: 1039616919, x: 25}, {e: lookup.Epoch{Time: 1074294831, Level: 20}, n: 1074294831, x: 19}, {e: lookup.Epoch{Time: 1088946052, Level: 1}, n: 1088917364, x: 14}, {e: lookup.Epoch{Time: 1112337595, Level: 17}, n: 1111008110, x: 22}, {e: lookup.Epoch{Time: 1099990284, Level: 5}, n: 1099968370, x: 15}, {e: lookup.Epoch{Time: 1087036441, Level: 16}, n: 1053967855, x: 25}, {e: lookup.Epoch{Time: 1069225185, Level: 8}, n: 1069224660, x: 10}, {e: lookup.Epoch{Time: 1057505479, Level: 9}, n: 1057505170, x: 14}, {e: lookup.Epoch{Time: 1072381377, Level: 12}, n: 1065950959, x: 22}, {e: lookup.Epoch{Time: 1093887139, Level: 8}, n: 1093863305, x: 14}, {e: lookup.Epoch{Time: 1082366510, Level: 24}, n: 1082366510, x: 23}, {e: lookup.Epoch{Time: 1103231132, Level: 14}, n: 1102292201, x: 22}, {e: lookup.Epoch{Time: 1094502355, Level: 3}, n: 1094324652, x: 18}, {e: lookup.Epoch{Time: 1068488344, Level: 12}, n: 1067577330, x: 19}, {e: lookup.Epoch{Time: 1050278233, Level: 12}, n: 1050278232, x: 11}, {e: lookup.Epoch{Time: 1047660768, Level: 5}, n: 1047652137, x: 17}, {e: lookup.Epoch{Time: 1060116167, Level: 11}, n: 1060114091, x: 12}, {e: lookup.Epoch{Time: 1068149392, Level: 21}, n: 1052074801, x: 24}, {e: lookup.Epoch{Time: 1081934120, Level: 6}, n: 1081933847, x: 8}, {e: lookup.Epoch{Time: 1107943693, Level: 16}, n: 1107096139, x: 25}, {e: lookup.Epoch{Time: 1131571649, Level: 9}, n: 1131570428, x: 11}, {e: lookup.Epoch{Time: 1123139367, Level: 0}, n: 1122912198, x: 20}, {e: lookup.Epoch{Time: 1121144423, Level: 6}, n: 1120568289, x: 20}, {e: lookup.Epoch{Time: 1089932411, Level: 17}, n: 1089932410, x: 16}, {e: lookup.Epoch{Time: 1104899012, Level: 22}, n: 1098978789, x: 22}, {e: lookup.Epoch{Time: 1094588059, Level: 21}, n: 1094588059, x: 20}, {e: lookup.Epoch{Time: 1114987438, Level: 24}, n: 1114987437, x: 23}, {e: lookup.Epoch{Time: 1084186305, Level: 7}, n: 1084186241, x: 6}, {e: lookup.Epoch{Time: 1058827111, Level: 8}, n: 1058826504, x: 9}, {e: lookup.Epoch{Time: 1090679810, Level: 12}, n: 1090616539, x: 17}, {e: lookup.Epoch{Time: 1084299475, Level: 23}, n: 1084299475, x: 22}} + +func TestGetNextLevel(t *testing.T) { + + // First, test well-known cases + last := lookup.Epoch{ + Time: 1533799046, + Level: 5, + } + + level := lookup.GetNextLevel(last, last.Time) + expected := uint8(4) + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a nonzero level, got %d", expected, level) + } + + level = lookup.GetNextLevel(last, last.Time+(1<<lookup.HighestLevel)+3000) + expected = lookup.HighestLevel + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for updates set 2^lookup.HighestLevel seconds away, got %d", expected, level) + } + + level = lookup.GetNextLevel(last, last.Time+(1<<last.Level)) + expected = last.Level + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for updates set 2^last.Level seconds away, got %d", expected, level) + } + + last.Level = 0 + level = lookup.GetNextLevel(last, last.Time) + expected = 0 + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a zero level, got %d", expected, level) + } + + // run a batch of 100 cooked tests + for _, s := range testGetNextLevelCases { + level := lookup.GetNextLevel(s.e, s.n) + if level != s.x { + t.Fatalf("Expected GetNextLevel to return %d for last=%s when now=%d, got %d", s.x, s.e.String(), s.n, level) + } + } + +} + +// cookGetNextLevelTests is used to generate a deterministic +// set of cases for TestGetNextLevel and thus "freeze" its current behavior +func CookGetNextLevelTests(t *testing.T) { + st := "" + var last lookup.Epoch + last.Time = 1000000000 + var now uint64 + var expected uint8 + for i := 0; i < 100; i++ { + last.Time += uint64(rand.Intn(1<<26)) - (1 << 25) + last.Level = uint8(rand.Intn(25)) + v := last.Level + uint8(rand.Intn(lookup.HighestLevel)) + if v > lookup.HighestLevel { + v = 0 + } + now = last.Time + uint64(rand.Intn(1<<v+1)) - (1 << v) + expected = lookup.GetNextLevel(last, now) + st = fmt.Sprintf("%s,testG{e:lookup.Epoch{Time:%d, Level:%d}, n:%d, x:%d}", st, last.Time, last.Level, now, expected) + } + fmt.Println(st) +} diff --git a/swarm/storage/mru/lookup_test.go b/swarm/storage/mru/lookup_test.go deleted file mode 100644 index b66b200a3..000000000 --- a/swarm/storage/mru/lookup_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package mru - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -func getTestUpdateLookup() *UpdateLookup { - metadata := *getTestMetadata() - rootAddr, _, _, _ := metadata.serializeAndHash() - return &UpdateLookup{ - period: 79, - version: 2010, - rootAddr: rootAddr, - } -} - -func compareUpdateLookup(a, b *UpdateLookup) bool { - return a.version == b.version && - a.period == b.period && - bytes.Equal(a.rootAddr, b.rootAddr) -} - -func TestUpdateLookupUpdateAddr(t *testing.T) { - ul := getTestUpdateLookup() - updateAddr := ul.UpdateAddr() - compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8fbc8d4777ef6da790257eda80ab4321fabd08cbdbe67e4e3da6caca386d64e0") -} - -func TestUpdateLookupSerializer(t *testing.T) { - serializedUpdateLookup := make([]byte, updateLookupLength) - ul := getTestUpdateLookup() - if err := ul.binaryPut(serializedUpdateLookup); err != nil { - t.Fatal(err) - } - compareByteSliceToExpectedHex(t, "serializedUpdateLookup", serializedUpdateLookup, "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb") - - // set receiving slice to the wrong size - serializedUpdateLookup = make([]byte, updateLookupLength+7) - if err := ul.binaryPut(serializedUpdateLookup); err == nil { - t.Fatalf("Expected UpdateLookup.binaryPut to fail when receiving slice has a length != %d", updateLookupLength) - } - - // set rootAddr to an invalid length - ul.rootAddr = []byte{1, 2, 3, 4} - serializedUpdateLookup = make([]byte, updateLookupLength) - if err := ul.binaryPut(serializedUpdateLookup); err == nil { - t.Fatal("Expected UpdateLookup.binaryPut to fail when rootAddr is not of the correct size") - } -} - -func TestUpdateLookupDeserializer(t *testing.T) { - serializedUpdateLookup, _ := hexutil.Decode("0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb") - var recoveredUpdateLookup UpdateLookup - if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil { - t.Fatal(err) - } - originalUpdateLookup := *getTestUpdateLookup() - if !compareUpdateLookup(&originalUpdateLookup, &recoveredUpdateLookup) { - t.Fatalf("Expected recovered UpdateLookup to match") - } - - // set source slice to the wrong size - serializedUpdateLookup = make([]byte, updateLookupLength+4) - if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err == nil { - t.Fatalf("Expected UpdateLookup.binaryGet to fail when source slice has a length != %d", updateLookupLength) - } -} - -func TestUpdateLookupSerializeDeserialize(t *testing.T) { - serializedUpdateLookup := make([]byte, updateLookupLength) - originalUpdateLookup := getTestUpdateLookup() - if err := originalUpdateLookup.binaryPut(serializedUpdateLookup); err != nil { - t.Fatal(err) - } - var recoveredUpdateLookup UpdateLookup - if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil { - t.Fatal(err) - } - if !compareUpdateLookup(originalUpdateLookup, &recoveredUpdateLookup) { - t.Fatalf("Expected recovered UpdateLookup to match") - } -} diff --git a/swarm/storage/mru/metadata.go b/swarm/storage/mru/metadata.go deleted file mode 100644 index 509114895..000000000 --- a/swarm/storage/mru/metadata.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "encoding/binary" - "hash" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// ResourceMetadata encapsulates the immutable information about a mutable resource :) -// once serialized into a chunk, the resource can be retrieved by knowing its content-addressed rootAddr -type ResourceMetadata struct { - StartTime Timestamp // time at which the resource starts to be valid - Frequency uint64 // expected update frequency for the resource - Name string // name of the resource, for the reference of the user or to disambiguate resources with same starttime, frequency, owneraddr - Owner common.Address // public address of the resource owner -} - -const frequencyLength = 8 // sizeof(uint64) -const nameLengthLength = 1 - -// Resource metadata chunk layout: -// 4 prefix bytes (chunkPrefixLength). The first two set to zero. The second two indicate the length -// Timestamp: timestampLength bytes -// frequency: frequencyLength bytes -// name length: nameLengthLength bytes -// name (variable length, can be empty, up to 255 bytes) -// ownerAddr: common.AddressLength -const minimumMetadataLength = chunkPrefixLength + timestampLength + frequencyLength + nameLengthLength + common.AddressLength - -// binaryGet populates the resource metadata from a byte array -func (r *ResourceMetadata) binaryGet(serializedData []byte) error { - if len(serializedData) < minimumMetadataLength { - return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short. Expected at least %d. Got %d.", minimumMetadataLength, len(serializedData)) - } - - // first two bytes must be set to zero to indicate metadata chunks, so enforce this. - if serializedData[0] != 0 || serializedData[1] != 0 { - return NewError(ErrCorruptData, "Invalid metadata chunk") - } - - cursor := 2 - metadataLength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) // metadataLength does not include the 4 prefix bytes - if metadataLength+chunkPrefixLength != len(serializedData) { - return NewErrorf(ErrCorruptData, "Incorrect declared metadata length. Expected %d, got %d.", metadataLength+chunkPrefixLength, len(serializedData)) - } - - cursor += 2 - - if err := r.StartTime.binaryGet(serializedData[cursor : cursor+timestampLength]); err != nil { - return err - } - cursor += timestampLength - - r.Frequency = binary.LittleEndian.Uint64(serializedData[cursor : cursor+frequencyLength]) - cursor += frequencyLength - - nameLength := int(serializedData[cursor]) - if nameLength+minimumMetadataLength > len(serializedData) { - return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short when decoding resource name. Expected at least %d. Got %d.", nameLength+minimumMetadataLength, len(serializedData)) - } - cursor++ - r.Name = string(serializedData[cursor : cursor+nameLength]) - cursor += nameLength - - copy(r.Owner[:], serializedData[cursor:]) - cursor += common.AddressLength - if cursor != len(serializedData) { - return NewErrorf(ErrInvalidValue, "Metadata chunk has leftover data after deserialization. %d left to read", len(serializedData)-cursor) - } - return nil -} - -// binaryPut encodes the metadata into a byte array -func (r *ResourceMetadata) binaryPut(serializedData []byte) error { - metadataChunkLength := r.binaryLength() - if len(serializedData) != metadataChunkLength { - return NewErrorf(ErrInvalidValue, "Need a slice of exactly %d bytes to serialize this metadata, but got a slice of size %d.", metadataChunkLength, len(serializedData)) - } - - // root chunk has first two bytes both set to 0, which distinguishes from update bytes - // therefore, skip the first two bytes of a zero-initialized array. - cursor := 2 - binary.LittleEndian.PutUint16(serializedData[cursor:cursor+2], uint16(metadataChunkLength-chunkPrefixLength)) // metadataLength does not include the 4 prefix bytes - cursor += 2 - - r.StartTime.binaryPut(serializedData[cursor : cursor+timestampLength]) - cursor += timestampLength - - binary.LittleEndian.PutUint64(serializedData[cursor:cursor+frequencyLength], r.Frequency) - cursor += frequencyLength - - // Encode the name string as a 1 byte length followed by the encoded string. - // Longer strings will be truncated. - nameLength := len(r.Name) - if nameLength > 255 { - nameLength = 255 - } - serializedData[cursor] = uint8(nameLength) - cursor++ - copy(serializedData[cursor:cursor+nameLength], []byte(r.Name[:nameLength])) - cursor += nameLength - - copy(serializedData[cursor:cursor+common.AddressLength], r.Owner[:]) - cursor += common.AddressLength - - return nil -} - -func (r *ResourceMetadata) binaryLength() int { - return minimumMetadataLength + len(r.Name) -} - -// serializeAndHash returns the root chunk addr and metadata hash that help identify and ascertain ownership of this resource -// returns the serialized metadata as a byproduct of having to hash it. -func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkData []byte, err error) { - - chunkData = make([]byte, r.binaryLength()) - if err := r.binaryPut(chunkData); err != nil { - return nil, nil, nil, err - } - rootAddr, metaHash = metadataHash(chunkData) - return rootAddr, metaHash, chunkData, nil - -} - -// creates a metadata chunk out of a resourceMetadata structure -func (metadata *ResourceMetadata) newChunk() (chunk storage.Chunk, metaHash []byte, err error) { - // the metadata chunk contains a timestamp of when the resource starts to be valid - // and also how frequently it is expected to be updated - // from this we know at what time we should look for updates, and how often - // it also contains the name of the resource, so we know what resource we are working with - - // the key (rootAddr) of the metadata chunk is content-addressed - // if it wasn't we couldn't replace it later - // resolving this relationship is left up to external agents (for example ENS) - rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() - if err != nil { - return nil, nil, err - } - - // make the chunk and send it to swarm - chunk = storage.NewChunk(rootAddr, chunkData) - - return chunk, metaHash, nil -} - -// metadataHash returns the metadata chunk root address and metadata hash -// that help identify and ascertain ownership of this resource -// We compute it as rootAddr = H(ownerAddr, H(metadata)) -// Where H() is SHA3 -// metadata are all the metadata fields, except ownerAddr -// ownerAddr is the public address of the resource owner -// Update chunks must carry a rootAddr reference and metaHash in order to be verified -// This way, a node that receives an update can check the signature, recover the public address -// and check the ownership by computing H(ownerAddr, metaHash) and comparing it to the rootAddr -// the resource is claiming to update without having to lookup the metadata chunk. -// see verifyResourceOwnerhsip in signedupdate.go -func metadataHash(chunkData []byte) (rootAddr, metaHash []byte) { - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(chunkData[:len(chunkData)-common.AddressLength]) - metaHash = hasher.Sum(nil) - hasher.Reset() - hasher.Write(metaHash) - hasher.Write(chunkData[len(chunkData)-common.AddressLength:]) - rootAddr = hasher.Sum(nil) - return -} diff --git a/swarm/storage/mru/metadata_test.go b/swarm/storage/mru/metadata_test.go deleted file mode 100644 index abbac6e3e..000000000 --- a/swarm/storage/mru/metadata_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. -package mru - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { - if hexutil.Encode(actualValue) != expectedHex { - t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) - } -} - -func getTestMetadata() *ResourceMetadata { - return &ResourceMetadata{ - Name: "world news report, every hour, on the hour", - StartTime: Timestamp{ - Time: 1528880400, - }, - Frequency: 3600, - Owner: newCharlieSigner().Address(), - } -} - -func TestMetadataSerializerDeserializer(t *testing.T) { - metadata := *getTestMetadata() - - rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() // creates hashes and marshals, in one go - if err != nil { - t.Fatal(err) - } - const expectedRootAddr = "0xfb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb" - const expectedMetaHash = "0xf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0" - const expectedChunkData = "0x00004f0010dd205b00000000100e0000000000002a776f726c64206e657773207265706f72742c20657665727920686f75722c206f6e2074686520686f7572876a8936a7cd0b79ef0735ad0896c1afe278781c" - - compareByteSliceToExpectedHex(t, "rootAddr", rootAddr, expectedRootAddr) - compareByteSliceToExpectedHex(t, "metaHash", metaHash, expectedMetaHash) - compareByteSliceToExpectedHex(t, "chunkData", chunkData, expectedChunkData) - - recoveredMetadata := ResourceMetadata{} - recoveredMetadata.binaryGet(chunkData) - - if recoveredMetadata != metadata { - t.Fatalf("Expected that the recovered metadata equals the marshalled metadata") - } - - // we are going to mess with the data, so create a backup to go back to it for the next test - backup := make([]byte, len(chunkData)) - copy(backup, chunkData) - - chunkData = []byte{1, 2, 3} - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since chunk is too small") - } - - // restore backup - chunkData = make([]byte, len(backup)) - copy(chunkData, backup) - - // mess with the prefix so it is not zero - chunkData[0] = 7 - chunkData[1] = 9 - - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since prefix bytes are not zero") - } - - // restore backup - chunkData = make([]byte, len(backup)) - copy(chunkData, backup) - - // mess with the length header to trigger an error - chunkData[2] = 255 - chunkData[3] = 44 - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since header length does not match") - } - - // restore backup - chunkData = make([]byte, len(backup)) - copy(chunkData, backup) - - // mess with name length header to trigger a chunk too short error - chunkData[20] = 255 - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since name length is incorrect") - } - - // restore backup - chunkData = make([]byte, len(backup)) - copy(chunkData, backup) - - // mess with name length header to trigger an leftover bytes to read error - chunkData[20] = 3 - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since name length is too small") - } -} - -func TestMetadataSerializerLengthCheck(t *testing.T) { - metadata := *getTestMetadata() - - // make a slice that is too small to contain the metadata - serializedMetadata := make([]byte, 4) - - if err := metadata.binaryPut(serializedMetadata); err == nil { - t.Fatal("Expected metadata.binaryPut to fail, since target slice is too small") - } - -} diff --git a/swarm/storage/mru/query.go b/swarm/storage/mru/query.go new file mode 100644 index 000000000..13a28eaab --- /dev/null +++ b/swarm/storage/mru/query.go @@ -0,0 +1,78 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "fmt" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +// Query is used to specify constraints when performing an update lookup +// TimeLimit indicates an upper bound for the search. Set to 0 for "now" +type Query struct { + View + Hint lookup.Epoch + TimeLimit uint64 +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (q *Query) FromValues(values Values) error { + time, _ := strconv.ParseUint(values.Get("time"), 10, 64) + q.TimeLimit = time + + level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32) + q.Hint.Level = uint8(level) + q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64) + if q.View.User == (common.Address{}) { + return q.View.FromValues(values) + } + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (q *Query) AppendValues(values Values) { + if q.TimeLimit != 0 { + values.Set("time", fmt.Sprintf("%d", q.TimeLimit)) + } + if q.Hint.Level != 0 { + values.Set("hint.level", fmt.Sprintf("%d", q.Hint.Level)) + } + if q.Hint.Time != 0 { + values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time)) + } + q.View.AppendValues(values) +} + +// NewQuery constructs an Query structure to find updates on or before `time` +// if time == 0, the latest update will be looked up +func NewQuery(view *View, time uint64, hint lookup.Epoch) *Query { + return &Query{ + TimeLimit: time, + View: *view, + Hint: hint, + } +} + +// NewQueryLatest generates lookup parameters that look for the latest version of a resource +func NewQueryLatest(view *View, hint lookup.Epoch) *Query { + return NewQuery(view, 0, hint) +} diff --git a/swarm/storage/mru/query_test.go b/swarm/storage/mru/query_test.go new file mode 100644 index 000000000..189a465d6 --- /dev/null +++ b/swarm/storage/mru/query_test.go @@ -0,0 +1,38 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "testing" +) + +func getTestQuery() *Query { + ul := getTestID() + return &Query{ + TimeLimit: 5000, + View: ul.View, + Hint: ul.Epoch, + } +} + +func TestQueryValues(t *testing.T) { + var expected = KV{"hint.level": "25", "hint.time": "1000", "time": "5000", "topic": "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000", "user": "0x876A8936A7Cd0b79Ef0735AD0896c1AFe278781c"} + + query := getTestQuery() + testValueSerializer(t, query, expected) + +} diff --git a/swarm/storage/mru/request.go b/swarm/storage/mru/request.go index af2ccf5c7..f6d0f38ff 100644 --- a/swarm/storage/mru/request.go +++ b/swarm/storage/mru/request.go @@ -19,157 +19,218 @@ package mru import ( "bytes" "encoding/json" + "hash" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" ) +// Request represents an update and/or resource create message +type Request struct { + ResourceUpdate // actual content that will be put on the chunk, less signature + Signature *Signature + idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) + binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) +} + // updateRequestJSON represents a JSON-serialized UpdateRequest type updateRequestJSON struct { - Name string `json:"name,omitempty"` - Frequency uint64 `json:"frequency,omitempty"` - StartTime uint64 `json:"startTime,omitempty"` - Owner string `json:"ownerAddr,omitempty"` - RootAddr string `json:"rootAddr,omitempty"` - MetaHash string `json:"metaHash,omitempty"` - Version uint32 `json:"version,omitempty"` - Period uint32 `json:"period,omitempty"` - Data string `json:"data,omitempty"` - Multihash bool `json:"multiHash"` - Signature string `json:"signature,omitempty"` + ID + ProtocolVersion uint8 `json:"protocolVersion"` + Data string `json:"data,omitempty"` + Signature string `json:"signature,omitempty"` } -// Request represents an update and/or resource create message -type Request struct { - SignedResourceUpdate - metadata ResourceMetadata - isNew bool +// Request layout +// resourceUpdate bytes +// SignatureLength bytes +const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength + +// NewFirstRequest returns a ready to sign request to publish a first update +func NewFirstRequest(topic Topic) *Request { + + request := new(Request) + + // get the current time + now := TimestampProvider.Now().Time + request.Epoch = lookup.GetFirstEpoch(now) + request.View.Topic = topic + request.Header.Version = ProtocolVersion + + return request +} + +// SetData stores the payload data the resource will be updated with +func (r *Request) SetData(data []byte) { + r.data = data + r.Signature = nil } -var zeroAddr = common.Address{} +// IsUpdate returns true if this request models a signed update or otherwise it is a signature request +func (r *Request) IsUpdate() bool { + return r.Signature != nil +} -// NewCreateUpdateRequest returns a ready to sign request to create and initialize a resource with data -func NewCreateUpdateRequest(metadata *ResourceMetadata) (*Request, error) { +// Verify checks that signatures are valid and that the signer owns the resource to be updated +func (r *Request) Verify() (err error) { + if len(r.data) == 0 { + return NewError(ErrInvalidValue, "Update does not contain data") + } + if r.Signature == nil { + return NewError(ErrInvalidSignature, "Missing signature field") + } - request, err := NewCreateRequest(metadata) + digest, err := r.GetDigest() if err != nil { - return nil, err + return err } - // get the current time - now := TimestampProvider.Now().Time - - request.version = 1 - request.period, err = getNextPeriod(metadata.StartTime.Time, now, metadata.Frequency) + // get the address of the signer (which also checks that it's a valid signature) + r.View.User, err = getUserAddr(digest, *r.Signature) if err != nil { - return nil, err + return err } - return request, nil + + // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) + // that was used to retrieve this chunk + // if this validation fails, someone forged a chunk. + if !bytes.Equal(r.idAddr, r.Addr()) { + return NewError(ErrInvalidSignature, "Signature address does not match with update user address") + } + + return nil } -// NewCreateRequest returns a request to create a new resource -func NewCreateRequest(metadata *ResourceMetadata) (request *Request, err error) { - if metadata.StartTime.Time == 0 { // get the current time - metadata.StartTime = TimestampProvider.Now() +// Sign executes the signature to validate the resource +func (r *Request) Sign(signer Signer) error { + r.View.User = signer.Address() + r.binaryData = nil //invalidate serialized data + digest, err := r.GetDigest() // computes digest and serializes into .binaryData + if err != nil { + return err } - if metadata.Owner == zeroAddr { - return nil, NewError(ErrInvalidValue, "OwnerAddr is not set") + signature, err := signer.Sign(digest) + if err != nil { + return err } - request = &Request{ - metadata: *metadata, + // Although the Signer interface returns the public address of the signer, + // recover it from the signature to see if they match + userAddr, err := getUserAddr(digest, signature) + if err != nil { + return NewError(ErrInvalidSignature, "Error verifying signature") } - request.rootAddr, request.metaHash, _, err = request.metadata.serializeAndHash() - request.isNew = true - return request, nil -} -// Frequency returns the resource's expected update frequency -func (r *Request) Frequency() uint64 { - return r.metadata.Frequency -} + if userAddr != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! + return NewError(ErrInvalidSignature, "Signer address does not match update user address") + } -// Name returns the resource human-readable name -func (r *Request) Name() string { - return r.metadata.Name + r.Signature = &signature + r.idAddr = r.Addr() + return nil } -// Multihash returns true if the resource data should be interpreted as a multihash -func (r *Request) Multihash() bool { - return r.multihash -} +// GetDigest creates the resource update digest used in signatures +// the serialized payload is cached in .binaryData +func (r *Request) GetDigest() (result common.Hash, err error) { + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + dataLength := r.ResourceUpdate.binaryLength() + if r.binaryData == nil { + r.binaryData = make([]byte, dataLength+signatureLength) + if err := r.ResourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil { + return result, err + } + } + hasher.Write(r.binaryData[:dataLength]) //everything except the signature. -// Period returns in which period the resource will be published -func (r *Request) Period() uint32 { - return r.period + return common.BytesToHash(hasher.Sum(nil)), nil } -// Version returns the resource version to publish -func (r *Request) Version() uint32 { - return r.version -} +// create an update chunk. +func (r *Request) toChunk() (storage.Chunk, error) { -// RootAddr returns the metadata chunk address -func (r *Request) RootAddr() storage.Address { - return r.rootAddr -} + // Check that the update is signed and serialized + // For efficiency, data is serialized during signature and cached in + // the binaryData field when computing the signature digest in .getDigest() + if r.Signature == nil || r.binaryData == nil { + return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.") + } -// StartTime returns the time that the resource was/will be created at -func (r *Request) StartTime() Timestamp { - return r.metadata.StartTime -} + resourceUpdateLength := r.ResourceUpdate.binaryLength() + + // signature is the last item in the chunk data + copy(r.binaryData[resourceUpdateLength:], r.Signature[:]) -// Owner returns the resource owner's address -func (r *Request) Owner() common.Address { - return r.metadata.Owner + chunk := storage.NewChunk(r.idAddr, r.binaryData) + return chunk, nil } -// Sign executes the signature to validate the resource and sets the owner address field -func (r *Request) Sign(signer Signer) error { - if r.metadata.Owner != zeroAddr && r.metadata.Owner != signer.Address() { - return NewError(ErrInvalidSignature, "Signer does not match current owner of the resource") - } +// fromChunk populates this structure from chunk data. It does not verify the signature is valid. +func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error { + // for update chunk layout see Request definition - if err := r.SignedResourceUpdate.Sign(signer); err != nil { + //deserialize the resource update portion + if err := r.ResourceUpdate.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { return err } - r.metadata.Owner = signer.Address() - return nil -} -// SetData stores the payload data the resource will be updated with -func (r *Request) SetData(data []byte, multihash bool) { - r.data = data - r.multihash = multihash - r.signature = nil - if !r.isNew { - r.metadata.Frequency = 0 // mark as update + // Extract the signature + var signature *Signature + cursor := r.ResourceUpdate.binaryLength() + sigdata := chunkdata[cursor : cursor+signatureLength] + if len(sigdata) > 0 { + signature = &Signature{} + copy(signature[:], sigdata) } + + r.Signature = signature + r.idAddr = updateAddr + r.binaryData = chunkdata + + return nil + } -func (r *Request) IsNew() bool { - return r.metadata.Frequency > 0 && (r.period <= 1 || r.version <= 1) +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (r *Request) FromValues(values Values, data []byte) error { + signatureBytes, err := hexutil.Decode(values.Get("signature")) + if err != nil { + r.Signature = nil + } else { + if len(signatureBytes) != signatureLength { + return NewError(ErrInvalidSignature, "Incorrect signature length") + } + r.Signature = new(Signature) + copy(r.Signature[:], signatureBytes) + } + err = r.ResourceUpdate.FromValues(values, data) + if err != nil { + return err + } + r.idAddr = r.Addr() + return err } -func (r *Request) IsUpdate() bool { - return r.signature != nil +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (r *Request) AppendValues(values Values) []byte { + if r.Signature != nil { + values.Set("signature", hexutil.Encode(r.Signature[:])) + } + return r.ResourceUpdate.AppendValues(values) } // fromJSON takes an update request JSON and populates an UpdateRequest func (r *Request) fromJSON(j *updateRequestJSON) error { - r.version = j.Version - r.period = j.Period - r.multihash = j.Multihash - r.metadata.Name = j.Name - r.metadata.Frequency = j.Frequency - r.metadata.StartTime.Time = j.StartTime - - if err := decodeHexArray(r.metadata.Owner[:], j.Owner, "ownerAddr"); err != nil { - return err - } + r.ID = j.ID + r.Header.Version = j.ProtocolVersion var err error if j.Data != "" { @@ -179,73 +240,18 @@ func (r *Request) fromJSON(j *updateRequestJSON) error { } } - var declaredRootAddr storage.Address - var declaredMetaHash []byte - - declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.AddressLength, "rootAddr") - if err != nil { - return err - } - declaredMetaHash, err = decodeHexSlice(j.MetaHash, 32, "metaHash") - if err != nil { - return err - } - - if r.IsNew() { - // for new resource creation, rootAddr and metaHash are optional because - // we can derive them from the content itself. - // however, if the user sent them, we check them for consistency. - - r.rootAddr, r.metaHash, _, err = r.metadata.serializeAndHash() - if err != nil { - return err - } - if j.RootAddr != "" && !bytes.Equal(declaredRootAddr, r.rootAddr) { - return NewError(ErrInvalidValue, "rootAddr does not match resource metadata") - } - if j.MetaHash != "" && !bytes.Equal(declaredMetaHash, r.metaHash) { - return NewError(ErrInvalidValue, "metaHash does not match resource metadata") - } - - } else { - //Update message - r.rootAddr = declaredRootAddr - r.metaHash = declaredMetaHash - } - if j.Signature != "" { sigBytes, err := hexutil.Decode(j.Signature) if err != nil || len(sigBytes) != signatureLength { return NewError(ErrInvalidSignature, "Cannot decode signature") } - r.signature = new(Signature) - r.updateAddr = r.UpdateAddr() - copy(r.signature[:], sigBytes) + r.Signature = new(Signature) + r.idAddr = r.Addr() + copy(r.Signature[:], sigBytes) } return nil } -func decodeHexArray(dst []byte, src, name string) error { - bytes, err := decodeHexSlice(src, len(dst), name) - if err != nil { - return err - } - if bytes != nil { - copy(dst, bytes) - } - return nil -} - -func decodeHexSlice(src string, expectedLength int, name string) (bytes []byte, err error) { - if src != "" { - bytes, err = hexutil.Decode(src) - if err != nil || len(bytes) != expectedLength { - return nil, NewErrorf(ErrInvalidValue, "Cannot decode %s", name) - } - } - return bytes, nil -} - // UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object // Implements json.Unmarshaler interface func (r *Request) UnmarshalJSON(rawData []byte) error { @@ -259,38 +265,19 @@ func (r *Request) UnmarshalJSON(rawData []byte) error { // MarshalJSON takes an update request and encodes it as a JSON structure into a byte array // Implements json.Marshaler interface func (r *Request) MarshalJSON() (rawData []byte, err error) { - var signatureString, dataHashString, rootAddrString, metaHashString string - if r.signature != nil { - signatureString = hexutil.Encode(r.signature[:]) + var signatureString, dataString string + if r.Signature != nil { + signatureString = hexutil.Encode(r.Signature[:]) } if r.data != nil { - dataHashString = hexutil.Encode(r.data) - } - if r.rootAddr != nil { - rootAddrString = hexutil.Encode(r.rootAddr) - } - if r.metaHash != nil { - metaHashString = hexutil.Encode(r.metaHash) - } - var ownerAddrString string - if r.metadata.Frequency == 0 { - ownerAddrString = "" - } else { - ownerAddrString = hexutil.Encode(r.metadata.Owner[:]) + dataString = hexutil.Encode(r.data) } requestJSON := &updateRequestJSON{ - Name: r.metadata.Name, - Frequency: r.metadata.Frequency, - StartTime: r.metadata.StartTime.Time, - Version: r.version, - Period: r.period, - Owner: ownerAddrString, - Data: dataHashString, - Multihash: r.multihash, - Signature: signatureString, - RootAddr: rootAddrString, - MetaHash: metaHashString, + ID: r.ID, + ProtocolVersion: r.Header.Version, + Data: dataString, + Signature: signatureString, } return json.Marshal(requestJSON) diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go index dba55b27e..c32d5ec13 100644 --- a/swarm/storage/mru/request_test.go +++ b/swarm/storage/mru/request_test.go @@ -1,11 +1,32 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + package mru import ( + "bytes" "encoding/binary" "encoding/json" "fmt" "reflect" "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" ) func areEqualJSON(s1, s2 string) (bool, error) { @@ -29,19 +50,13 @@ func areEqualJSON(s1, s2 string) (bool, error) { // while also checking cryptographically that only the owner of a resource can update it. func TestEncodingDecodingUpdateRequests(t *testing.T) { - signer := newCharlieSigner() //Charlie, our good guy - falseSigner := newBobSigner() //Bob will play the bad guy again + charlie := newCharlieSigner() //Charlie + bob := newBobSigner() //Bob // Create a resource to our good guy Charlie's name - createRequest, err := NewCreateRequest(&ResourceMetadata{ - Name: "a good resource name", - Frequency: 300, - StartTime: Timestamp{Time: 1528900000}, - Owner: signer.Address()}) - - if err != nil { - t.Fatalf("Error creating resource name: %s", err) - } + topic, _ := NewTopic("a good resource name", nil) + createRequest := NewFirstRequest(topic) + createRequest.User = charlie.Address() // We now encode the create message to simulate we send it over the wire messageRawData, err := createRequest.MarshalJSON() @@ -64,27 +79,21 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { // and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct // proof of ownership - metaHash := createRequest.metaHash - rootAddr := createRequest.rootAddr - const expectedSignature = "0x1c2bab66dc4ed63783d62934e3a628e517888d6949aef0349f3bd677121db9aa09bbfb865904e6c50360e209e0fe6fe757f8a2474cf1b34169c99b95e3fd5a5101" - const expectedJSON = `{"rootAddr":"0x6e744a730f7ea0881528576f0354b6268b98e35a6981ef703153ff1b8d32bbef","metaHash":"0x0c0d5c18b89da503af92302a1a64fab6acb60f78e288eb9c3d541655cd359b60","version":1,"period":7,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421","multiHash":false}` + const expectedSignature = "0x32c2d2c7224e24e4d3ae6a10595fc6e945f1b3ecdf548a04d8247c240a50c9240076aa7730abad6c8a46dfea00cfb8f43b6211f02db5c4cc5ed8584cb0212a4d00" + const expectedJSON = `{"view":{"topic":"0x6120676f6f64207265736f75726365206e616d65000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` //Put together an unsigned update request that we will serialize to send it to the signer. data := []byte("This hour's update: Swarm 99.0 has been released!") request := &Request{ - SignedResourceUpdate: SignedResourceUpdate{ - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: UpdateLookup{ - period: 7, - version: 1, - rootAddr: rootAddr, - }, - multihash: false, - metaHash: metaHash, + ResourceUpdate: ResourceUpdate{ + ID: ID{ + Epoch: lookup.Epoch{ + Time: 1000, + Level: 1, }, - data: data, + View: createRequest.ResourceUpdate.View, }, + data: data, }, } @@ -110,11 +119,11 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { } //sign the request and see if it matches our predefined signature above. - if err := recoveredRequest.Sign(signer); err != nil { + if err := recoveredRequest.Sign(charlie); err != nil { t.Fatalf("Error signing request: %s", err) } - compareByteSliceToExpectedHex(t, "signature", recoveredRequest.signature[:], expectedSignature) + compareByteSliceToExpectedHex(t, "signature", recoveredRequest.Signature[:], expectedSignature) // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON // to alter the signature field. @@ -129,9 +138,9 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") } - // Now imagine Evil Bob (why always Bob, poor Bob) attempts to update Charlie's resource, + // Now imagine Bob wants to create an update of his own about the same resource, // signing a message with his private key - if err := request.Sign(falseSigner); err != nil { + if err := request.Sign(bob); err != nil { t.Fatalf("Error signing: %s", err) } @@ -147,29 +156,159 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { t.Fatalf("Error decoding message:%s", err) } - // Before discovering Bob's misdemeanor, let's see what would happen if we mess + // Before checking what happened with Bob's update, let's see what would happen if we mess // with the signature big time to see if Verify catches it - savedSignature := *recoveredRequest.signature // save the signature for later - binary.LittleEndian.PutUint64(recoveredRequest.signature[5:], 556845463424) // write some random data to break the signature + savedSignature := *recoveredRequest.Signature // save the signature for later + binary.LittleEndian.PutUint64(recoveredRequest.Signature[5:], 556845463424) // write some random data to break the signature if err = recoveredRequest.Verify(); err == nil { t.Fatal("Expected Verify to fail on corrupt signature") } - // restore the Evil Bob's signature from corruption - *recoveredRequest.signature = savedSignature + // restore the Bob's signature from corruption + *recoveredRequest.Signature = savedSignature - // Now the signature is not corrupt, however Verify should now fail because Bob doesn't own the resource - if err = recoveredRequest.Verify(); err == nil { - t.Fatalf("Expected Verify to fail because this resource belongs to Charlie, not Bob the attacker:%s", err) + // Now the signature is not corrupt + if err = recoveredRequest.Verify(); err != nil { + t.Fatal(err) } - // Sign with our friend Charlie's private key - if err := recoveredRequest.Sign(signer); err != nil { + // Reuse object and sign with our friend Charlie's private key + if err := recoveredRequest.Sign(charlie); err != nil { t.Fatalf("Error signing with the correct private key: %s", err) } - // And now, Verify should work since this resource belongs to Charlie + // And now, Verify should work since this update now belongs to Charlie if err = recoveredRequest.Verify(); err != nil { - t.Fatalf("Error verifying that Charlie, the good guy, can sign his resource:%s", err) + t.Fatalf("Error verifying that Charlie, can sign a reused request object:%s", err) + } + + // mess with the lookup key to make sure Verify fails: + recoveredRequest.Time = 77999 // this will alter the lookup key + if err = recoveredRequest.Verify(); err == nil { + t.Fatalf("Expected Verify to fail since the lookup key has been altered") + } +} + +func getTestRequest() *Request { + return &Request{ + ResourceUpdate: *getTestResourceUpdate(), + } +} + +func TestUpdateChunkSerializationErrorChecking(t *testing.T) { + + // Test that parseUpdate fails if the chunk is too small + var r Request + if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil { + t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) + } + + r = *getTestRequest() + + _, err := r.toChunk() + if err == nil { + t.Fatal("Expected request.toChunk to fail when there is no data") + } + r.data = []byte("Al bien hacer jamás le falta premio") // put some arbitrary length data + _, err = r.toChunk() + if err == nil { + t.Fatal("expected request.toChunk to fail when there is no signature") + } + + charlie := newCharlieSigner() + if err := r.Sign(charlie); err != nil { + t.Fatalf("error signing:%s", err) + } + + chunk, err := r.toChunk() + if err != nil { + t.Fatalf("error creating update chunk:%s", err) + } + + compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00") + + var recovered Request + recovered.fromChunk(chunk.Address(), chunk.Data()) + if !reflect.DeepEqual(recovered, r) { + t.Fatal("Expected recovered SignedResource update to equal the original one") + } +} + +// check that signature address matches update signer address +func TestReverse(t *testing.T) { + + epoch := lookup.Epoch{ + Time: 7888, + Level: 6, + } + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + // set up rpc and create resourcehandler + _, _, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + topic, _ := NewTopic("Cervantes quotes", nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + + data := []byte("Donde una puerta se cierra, otra se abre") + + request := new(Request) + request.View = view + request.Epoch = epoch + request.data = data + + // generate a chunk key for this request + key := request.Addr() + + if err = request.Sign(signer); err != nil { + t.Fatal(err) + } + + chunk, err := request.toChunk() + if err != nil { + t.Fatal(err) + } + + // check that we can recover the owner account from the update chunk's signature + var checkUpdate Request + if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil { + t.Fatal(err) + } + checkdigest, err := checkUpdate.GetDigest() + if err != nil { + t.Fatal(err) + } + recoveredaddress, err := getUserAddr(checkdigest, *checkUpdate.Signature) + if err != nil { + t.Fatalf("Retrieve address from signature fail: %v", err) + } + originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) + + // check that the metadata retrieved from the chunk matches what we gave it + if recoveredaddress != originaladdress { + t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress) + } + + if !bytes.Equal(key[:], chunk.Address()[:]) { + t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address()) + } + if epoch != checkUpdate.Epoch { + t.Fatalf("Expected epoch to be '%s', was '%s'", epoch.String(), checkUpdate.Epoch.String()) + } + if !bytes.Equal(data, checkUpdate.data) { + t.Fatalf("Expected data '%x', was '%x'", data, checkUpdate.data) } } diff --git a/swarm/storage/mru/resource.go b/swarm/storage/mru/resource.go deleted file mode 100644 index aa83ff62a..000000000 --- a/swarm/storage/mru/resource.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "bytes" - "context" - "time" - - "github.com/ethereum/go-ethereum/swarm/storage" -) - -const ( - defaultStoreTimeout = 4000 * time.Millisecond - hasherCount = 8 - resourceHashAlgorithm = storage.SHA3Hash - defaultRetrieveTimeout = 100 * time.Millisecond -) - -// resource caches resource data and the metadata of its root chunk. -type resource struct { - resourceUpdate - ResourceMetadata - *bytes.Reader - lastKey storage.Address - updated time.Time -} - -func (r *resource) Context() context.Context { - return context.TODO() -} - -// TODO Expire content after a defined period (to force resync) -func (r *resource) isSynced() bool { - return !r.updated.IsZero() -} - -// implements storage.LazySectionReader -func (r *resource) Size(ctx context.Context, _ chan bool) (int64, error) { - if !r.isSynced() { - return 0, NewError(ErrNotSynced, "Not synced") - } - return int64(len(r.resourceUpdate.data)), nil -} - -//returns the resource's human-readable name -func (r *resource) Name() string { - return r.ResourceMetadata.Name -} - -// Helper function to calculate the next update period number from the current time, start time and frequency -func getNextPeriod(start uint64, current uint64, frequency uint64) (uint32, error) { - if current < start { - return 0, NewErrorf(ErrInvalidValue, "given current time value %d < start time %d", current, start) - } - if frequency == 0 { - return 0, NewError(ErrInvalidValue, "frequency is 0") - } - timeDiff := current - start - period := timeDiff / frequency - return uint32(period + 1), nil -} diff --git a/swarm/storage/mru/resource_sign.go b/swarm/storage/mru/resource_sign.go index a9f7cb629..58196f10e 100644 --- a/swarm/storage/mru/resource_sign.go +++ b/swarm/storage/mru/resource_sign.go @@ -60,7 +60,16 @@ func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) return } -// PublicKey returns the public key of the signer's private key +// Address returns the public key of the signer's private key func (s *GenericSigner) Address() common.Address { return s.address } + +// getUserAddr extracts the address of the resource update signer +func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { + pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) + if err != nil { + return common.Address{}, err + } + return crypto.PubkeyToAddress(*pub), nil +} diff --git a/swarm/storage/mru/resource_test.go b/swarm/storage/mru/resource_test.go deleted file mode 100644 index 0fb465bb0..000000000 --- a/swarm/storage/mru/resource_test.go +++ /dev/null @@ -1,902 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/binary" - "flag" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/ethereum/go-ethereum/contracts/ens" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/swarm/chunk" - "github.com/ethereum/go-ethereum/swarm/multihash" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -var ( - loglevel = flag.Int("loglevel", 3, "loglevel") - testHasher = storage.MakeHashFunc(resourceHashAlgorithm)() - startTime = Timestamp{ - Time: uint64(4200), - } - resourceFrequency = uint64(42) - cleanF func() - resourceName = "føø.bar" - hashfunc = storage.MakeHashFunc(storage.DefaultHash) -) - -func init() { - flag.Parse() - log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) -} - -// simulated timeProvider -type fakeTimeProvider struct { - currentTime uint64 -} - -func (f *fakeTimeProvider) Tick() { - f.currentTime++ -} - -func (f *fakeTimeProvider) Now() Timestamp { - return Timestamp{ - Time: f.currentTime, - } -} - -func TestUpdateChunkSerializationErrorChecking(t *testing.T) { - - // Test that parseUpdate fails if the chunk is too small - var r SignedResourceUpdate - if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1)); err == nil { - t.Fatalf("Expected parseUpdate to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) - } - - r = SignedResourceUpdate{} - // Test that parseUpdate fails when the length header does not match the data array length - fakeChunk := make([]byte, 150) - binary.LittleEndian.PutUint16(fakeChunk, 44) - if err := r.fromChunk(storage.ZeroAddr, fakeChunk); err == nil { - t.Fatal("Expected parseUpdate to fail when the header length does not match the actual data array passed in") - } - - r = SignedResourceUpdate{ - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: UpdateLookup{ - rootAddr: make([]byte, 79), // put the wrong length, should be storage.AddressLength - }, - metaHash: nil, - multihash: false, - }, - }, - } - _, err := r.toChunk() - if err == nil { - t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length") - } - r.rootAddr = make([]byte, storage.AddressLength) - r.metaHash = make([]byte, storage.AddressLength) - _, err = r.toChunk() - if err == nil { - t.Fatal("Expected newUpdateChunk to fail when there is no data") - } - r.data = make([]byte, 79) // put some arbitrary length data - _, err = r.toChunk() - if err == nil { - t.Fatal("expected newUpdateChunk to fail when there is no signature", err) - } - - alice := newAliceSigner() - if err := r.Sign(alice); err != nil { - t.Fatalf("error signing:%s", err) - - } - _, err = r.toChunk() - if err != nil { - t.Fatalf("error creating update chunk:%s", err) - } - - r.multihash = true - r.data[1] = 79 // mess with the multihash, corrupting one byte of it. - if err := r.Sign(alice); err == nil { - t.Fatal("expected Sign() to fail when an invalid multihash is in data and multihash=true", err) - } -} - -// check that signature address matches update signer address -func TestReverse(t *testing.T) { - - period := uint32(4) - version := uint32(2) - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up rpc and create resourcehandler - _, _, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - metadata := ResourceMetadata{ - Name: resourceName, - StartTime: startTime, - Frequency: resourceFrequency, - Owner: signer.Address(), - } - - rootAddr, metaHash, _, err := metadata.serializeAndHash() - if err != nil { - t.Fatal(err) - } - - // generate some bogus data for the chunk and sign it - data := make([]byte, 8) - _, err = rand.Read(data) - if err != nil { - t.Fatal(err) - } - testHasher.Reset() - testHasher.Write(data) - - update := &SignedResourceUpdate{ - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: UpdateLookup{ - period: period, - version: version, - rootAddr: rootAddr, - }, - metaHash: metaHash, - }, - data: data, - }, - } - // generate a hash for t=4200 version 1 - key := update.UpdateAddr() - - if err = update.Sign(signer); err != nil { - t.Fatal(err) - } - - chunk, err := update.toChunk() - if err != nil { - t.Fatal(err) - } - - // check that we can recover the owner account from the update chunk's signature - var checkUpdate SignedResourceUpdate - if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil { - t.Fatal(err) - } - checkdigest, err := checkUpdate.GetDigest() - if err != nil { - t.Fatal(err) - } - recoveredaddress, err := getOwner(checkdigest, *checkUpdate.signature) - if err != nil { - t.Fatalf("Retrieve address from signature fail: %v", err) - } - originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) - - // check that the metadata retrieved from the chunk matches what we gave it - if recoveredaddress != originaladdress { - t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress) - } - - if !bytes.Equal(key[:], chunk.Address()[:]) { - t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address()) - } - if period != checkUpdate.period { - t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period) - } - if version != checkUpdate.version { - t.Fatalf("Expected version '%d', was '%d'", version, checkUpdate.version) - } - if !bytes.Equal(data, checkUpdate.data) { - t.Fatalf("Expectedn data '%x', was '%x'", data, checkUpdate.data) - } -} - -// make updates and retrieve them based on periods and versions -func TestResourceHandler(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - rh, datadir, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create a new resource - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - metadata := &ResourceMetadata{ - Name: resourceName, - Frequency: resourceFrequency, - StartTime: Timestamp{Time: timeProvider.Now().Time}, - Owner: signer.Address(), - } - - request, err := NewCreateUpdateRequest(metadata) - if err != nil { - t.Fatal(err) - } - request.Sign(signer) - if err != nil { - t.Fatal(err) - } - err = rh.New(ctx, request) - if err != nil { - t.Fatal(err) - } - - chunk, err := rh.chunkStore.Get(ctx, storage.Address(request.rootAddr)) - if err != nil { - t.Fatal(err) - } else if len(chunk.Data()) < 16 { - t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.Data())) - } - - var recoveredMetadata ResourceMetadata - - recoveredMetadata.binaryGet(chunk.Data()) - if err != nil { - t.Fatal(err) - } - if recoveredMetadata.StartTime.Time != timeProvider.currentTime { - t.Fatalf("stored startTime %d does not match provided startTime %d", recoveredMetadata.StartTime.Time, timeProvider.currentTime) - } - if recoveredMetadata.Frequency != resourceFrequency { - t.Fatalf("stored frequency %d does not match provided frequency %d", recoveredMetadata.Frequency, resourceFrequency) - } - - // data for updates: - updates := []string{ - "blinky", - "pinky", - "inky", - "clyde", - } - - // update halfway to first period. period=1, version=1 - resourcekey := make(map[string]storage.Address) - fwdClock(int(resourceFrequency/2), timeProvider) - data := []byte(updates[0]) - request.SetData(data, false) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[0]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - // update on first period with version = 1 to make it fail since there is already one update with version=1 - request, err = rh.NewUpdateRequest(ctx, request.rootAddr) - if err != nil { - t.Fatal(err) - } - if request.version != 2 || request.period != 1 { - t.Fatal("Suggested period should be 1 and version should be 2") - } - - request.version = 1 // force version 1 instead of 2 to make it fail - data = []byte(updates[1]) - request.SetData(data, false) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err == nil { - t.Fatal("Expected update to fail since this version already exists") - } - - // update on second period with version = 1, correct. period=2, version=1 - fwdClock(int(resourceFrequency/2), timeProvider) - request, err = rh.NewUpdateRequest(ctx, request.rootAddr) - if err != nil { - t.Fatal(err) - } - request.SetData(data, false) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - fwdClock(int(resourceFrequency), timeProvider) - // Update on third period, with version = 1 - request, err = rh.NewUpdateRequest(ctx, request.rootAddr) - if err != nil { - t.Fatal(err) - } - data = []byte(updates[2]) - request.SetData(data, false) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[2]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - // update just after third period - fwdClock(1, timeProvider) - request, err = rh.NewUpdateRequest(ctx, request.rootAddr) - if err != nil { - t.Fatal(err) - } - if request.period != 3 || request.version != 2 { - t.Fatal("Suggested period should be 3 and version should be 2") - } - data = []byte(updates[3]) - request.SetData(data, false) - - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[3]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - rh.Close() - - // check we can retrieve the updates after close - // it will match on second iteration startTime + (resourceFrequency * 3) - fwdClock(int(resourceFrequency*2)-1, timeProvider) - - rhparams := &HandlerParams{} - - rh2, err := NewTestHandler(datadir, rhparams) - if err != nil { - t.Fatal(err) - } - - rsrc2, err := rh2.Load(context.TODO(), request.rootAddr) - if err != nil { - t.Fatal(err) - } - - _, err = rh2.Lookup(ctx, LookupLatest(request.rootAddr)) - if err != nil { - t.Fatal(err) - } - - // last update should be "clyde", version two, time= startTime + (resourcefrequency * 3) - if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) { - t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) - } - if rsrc2.version != 2 { - t.Fatalf("resource version was %d, expected 2", rsrc2.version) - } - if rsrc2.period != 3 { - t.Fatalf("resource period was %d, expected 3", rsrc2.period) - } - log.Debug("Latest lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) - - // specific period, latest version - rsrc, err := rh2.Lookup(ctx, LookupLatestVersionInPeriod(request.rootAddr, 3)) - if err != nil { - t.Fatal(err) - } - // check data - if !bytes.Equal(rsrc.data, []byte(updates[len(updates)-1])) { - t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) - } - log.Debug("Historical lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) - - // specific period, specific version - lookupParams := LookupVersion(request.rootAddr, 3, 1) - rsrc, err = rh2.Lookup(ctx, lookupParams) - if err != nil { - t.Fatal(err) - } - // check data - if !bytes.Equal(rsrc.data, []byte(updates[2])) { - t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2]) - } - log.Debug("Specific version lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) - - // we are now at third update - // check backwards stepping to the first - for i := 1; i >= 0; i-- { - rsrc, err := rh2.LookupPrevious(ctx, lookupParams) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(rsrc.data, []byte(updates[i])) { - t.Fatalf("resource data (previous) was %v, expected %v", rsrc.data, updates[i]) - - } - } - - // beyond the first should yield an error - rsrc, err = rh2.LookupPrevious(ctx, lookupParams) - if err == nil { - t.Fatalf("expected previous to fail, returned period %d version %d data %v", rsrc.period, rsrc.version, rsrc.data) - } - -} - -func TestMultihash(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up rpc and create resourcehandler - rh, datadir, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create a new resource - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - metadata := &ResourceMetadata{ - Name: resourceName, - Frequency: resourceFrequency, - StartTime: Timestamp{Time: timeProvider.Now().Time}, - Owner: signer.Address(), - } - - mr, err := NewCreateRequest(metadata) - if err != nil { - t.Fatal(err) - } - err = rh.New(ctx, mr) - if err != nil { - t.Fatal(err) - } - - // we're naïvely assuming keccak256 for swarm hashes - // if it ever changes this test should also change - multihashbytes := ens.EnsNode("foo") - multihashmulti := multihash.ToMultihash(multihashbytes.Bytes()) - if err != nil { - t.Fatal(err) - } - mr.SetData(multihashmulti, true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - multihashkey, err := rh.Update(ctx, &mr.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - sha1bytes := make([]byte, multihash.MultihashLength) - sha1multi := multihash.ToMultihash(sha1bytes) - if err != nil { - t.Fatal(err) - } - mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) - if err != nil { - t.Fatal(err) - } - mr.SetData(sha1multi, true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - sha1key, err := rh.Update(ctx, &mr.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - // invalid multihashes - mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) - if err != nil { - t.Fatal(err) - } - mr.SetData(multihashmulti[1:], true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - _, err = rh.Update(ctx, &mr.SignedResourceUpdate) - if err == nil { - t.Fatalf("Expected update to fail with first byte skipped") - } - mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) - if err != nil { - t.Fatal(err) - } - mr.SetData(multihashmulti[:len(multihashmulti)-2], true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - - _, err = rh.Update(ctx, &mr.SignedResourceUpdate) - if err == nil { - t.Fatalf("Expected update to fail with last byte skipped") - } - - data, err := getUpdateDirect(rh.Handler, multihashkey) - if err != nil { - t.Fatal(err) - } - multihashdecode, err := multihash.FromMultihash(data) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) { - t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes()) - } - data, err = getUpdateDirect(rh.Handler, sha1key) - if err != nil { - t.Fatal(err) - } - shadecode, err := multihash.FromMultihash(data) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(shadecode, sha1bytes) { - t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes) - } - rh.Close() - - rhparams := &HandlerParams{} - // test with signed data - rh2, err := NewTestHandler(datadir, rhparams) - if err != nil { - t.Fatal(err) - } - mr, err = NewCreateRequest(metadata) - if err != nil { - t.Fatal(err) - } - err = rh2.New(ctx, mr) - if err != nil { - t.Fatal(err) - } - - mr.SetData(multihashmulti, true) - mr.Sign(signer) - - if err != nil { - t.Fatal(err) - } - multihashsignedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - mr, err = rh2.NewUpdateRequest(ctx, mr.rootAddr) - if err != nil { - t.Fatal(err) - } - mr.SetData(sha1multi, true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - - sha1signedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - data, err = getUpdateDirect(rh2.Handler, multihashsignedkey) - if err != nil { - t.Fatal(err) - } - multihashdecode, err = multihash.FromMultihash(data) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) { - t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes()) - } - data, err = getUpdateDirect(rh2.Handler, sha1signedkey) - if err != nil { - t.Fatal(err) - } - shadecode, err = multihash.FromMultihash(data) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(shadecode, sha1bytes) { - t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes) - } -} - -// \TODO verify testing of signature validation and enforcement -func TestValidator(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key. Alice will be the good girl - signer := newAliceSigner() - - // fake signer for false results. Bob will play the bad guy today. - falseSigner := newBobSigner() - - // set up sim timeProvider - rh, _, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create new resource - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - metadata := &ResourceMetadata{ - Name: resourceName, - Frequency: resourceFrequency, - StartTime: Timestamp{Time: timeProvider.Now().Time}, - Owner: signer.Address(), - } - mr, err := NewCreateRequest(metadata) - if err != nil { - t.Fatal(err) - } - mr.Sign(signer) - - err = rh.New(ctx, mr) - if err != nil { - t.Fatalf("Create resource fail: %v", err) - } - - // chunk with address - data := []byte("foo") - mr.SetData(data, false) - if err := mr.Sign(signer); err != nil { - t.Fatalf("sign fail: %v", err) - } - chunk, err := mr.SignedResourceUpdate.toChunk() - if err != nil { - t.Fatal(err) - } - if !rh.Validate(chunk.Address(), chunk.Data()) { - t.Fatal("Chunk validator fail on update chunk") - } - - // chunk with address made from different publickey - if err := mr.Sign(falseSigner); err == nil { - t.Fatalf("Expected Sign to fail since we are using a different OwnerAddr: %v", err) - } - - // chunk with address made from different publickey - mr.metadata.Owner = zeroAddr // set to zero to bypass .Sign() check - if err := mr.Sign(falseSigner); err != nil { - t.Fatalf("sign fail: %v", err) - } - - chunk, err = mr.SignedResourceUpdate.toChunk() - if err != nil { - t.Fatal(err) - } - - if rh.Validate(chunk.Address(), chunk.Data()) { - t.Fatal("Chunk validator did not fail on update chunk with false address") - } - - ctx, cancel = context.WithTimeout(context.Background(), time.Second) - defer cancel() - - metadata = &ResourceMetadata{ - Name: resourceName, - StartTime: TimestampProvider.Now(), - Frequency: resourceFrequency, - Owner: signer.Address(), - } - chunk, _, err = metadata.newChunk() - if err != nil { - t.Fatal(err) - } - - if !rh.Validate(chunk.Address(), chunk.Data()) { - t.Fatal("Chunk validator fail on metadata chunk") - } -} - -// tests that the content address validator correctly checks the data -// tests that resource update chunks are passed through content address validator -// there is some redundancy in this test as it also tests content addressed chunks, -// which should be evaluated as invalid chunks by this validator -func TestValidatorInStore(t *testing.T) { - - // make fake timeProvider - TimestampProvider = &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up localstore - datadir, err := ioutil.TempDir("", "storage-testresourcevalidator") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(datadir) - - params := storage.NewDefaultLocalStoreParams() - params.Init(datadir) - store, err := storage.NewLocalStore(params, nil) - if err != nil { - t.Fatal(err) - } - - // set up resource handler and add is as a validator to the localstore - rhParams := &HandlerParams{} - rh := NewHandler(rhParams) - store.Validators = append(store.Validators, rh) - - // create content addressed chunks, one good, one faulty - chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) - goodChunk := chunks[0] - badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) - - metadata := &ResourceMetadata{ - StartTime: startTime, - Name: "xyzzy", - Frequency: resourceFrequency, - Owner: signer.Address(), - } - - rootChunk, metaHash, err := metadata.newChunk() - if err != nil { - t.Fatal(err) - } - // create a resource update chunk with correct publickey - updateLookup := UpdateLookup{ - period: 42, - version: 1, - rootAddr: rootChunk.Address(), - } - - updateAddr := updateLookup.UpdateAddr() - data := []byte("bar") - - r := SignedResourceUpdate{ - updateAddr: updateAddr, - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: updateLookup, - metaHash: metaHash, - }, - data: data, - }, - } - - r.Sign(signer) - - uglyChunk, err := r.toChunk() - if err != nil { - t.Fatal(err) - } - - // put the chunks in the store and check their error status - err = store.Put(context.Background(), goodChunk) - if err == nil { - t.Fatal("expected error on good content address chunk with resource validator only, but got nil") - } - err = store.Put(context.Background(), badChunk) - if err == nil { - t.Fatal("expected error on bad content address chunk with resource validator only, but got nil") - } - err = store.Put(context.Background(), uglyChunk) - if err != nil { - t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err) - } -} - -// fast-forward clock -func fwdClock(count int, timeProvider *fakeTimeProvider) { - for i := 0; i < count; i++ { - timeProvider.Tick() - } -} - -// create rpc and resourcehandler -func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) { - - var fsClean func() - var rpcClean func() - cleanF = func() { - if fsClean != nil { - fsClean() - } - if rpcClean != nil { - rpcClean() - } - } - - // temp datadir - datadir, err = ioutil.TempDir("", "rh") - if err != nil { - return nil, "", nil, err - } - fsClean = func() { - os.RemoveAll(datadir) - } - - TimestampProvider = timeProvider - rhparams := &HandlerParams{} - rh, err = NewTestHandler(datadir, rhparams) - return rh, datadir, cleanF, err -} - -func newAliceSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") - return NewGenericSigner(privKey) -} - -func newBobSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") - return NewGenericSigner(privKey) -} - -func newCharlieSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") - return NewGenericSigner(privKey) -} - -func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { - chunk, err := rh.chunkStore.Get(context.TODO(), addr) - if err != nil { - return nil, err - } - var r SignedResourceUpdate - if err := r.fromChunk(addr, chunk.Data()); err != nil { - return nil, err - } - return r.data, nil -} diff --git a/swarm/storage/mru/signedupdate.go b/swarm/storage/mru/signedupdate.go deleted file mode 100644 index 41a5a5e63..000000000 --- a/swarm/storage/mru/signedupdate.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "bytes" - "hash" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// SignedResourceUpdate represents a resource update with all the necessary information to prove ownership of the resource -type SignedResourceUpdate struct { - resourceUpdate // actual content that will be put on the chunk, less signature - signature *Signature - updateAddr storage.Address // resulting chunk address for the update (not serialized, for internal use) - binaryData []byte // resulting serialized data (not serialized, for efficiency/internal use) -} - -// Verify checks that signatures are valid and that the signer owns the resource to be updated -func (r *SignedResourceUpdate) Verify() (err error) { - if len(r.data) == 0 { - return NewError(ErrInvalidValue, "Update does not contain data") - } - if r.signature == nil { - return NewError(ErrInvalidSignature, "Missing signature field") - } - - digest, err := r.GetDigest() - if err != nil { - return err - } - - // get the address of the signer (which also checks that it's a valid signature) - ownerAddr, err := getOwner(digest, *r.signature) - if err != nil { - return err - } - - if !bytes.Equal(r.updateAddr, r.UpdateAddr()) { - return NewError(ErrInvalidSignature, "Signature address does not match with ownerAddr") - } - - // Check if who signed the resource update really owns the resource - if !verifyOwner(ownerAddr, r.metaHash, r.rootAddr) { - return NewErrorf(ErrUnauthorized, "signature is valid but signer does not own the resource: %v", err) - } - - return nil -} - -// Sign executes the signature to validate the resource -func (r *SignedResourceUpdate) Sign(signer Signer) error { - - r.binaryData = nil //invalidate serialized data - digest, err := r.GetDigest() // computes digest and serializes into .binaryData - if err != nil { - return err - } - - signature, err := signer.Sign(digest) - if err != nil { - return err - } - - // Although the Signer interface returns the public address of the signer, - // recover it from the signature to see if they match - ownerAddress, err := getOwner(digest, signature) - if err != nil { - return NewError(ErrInvalidSignature, "Error verifying signature") - } - - if ownerAddress != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! - return NewError(ErrInvalidSignature, "Signer address does not match ownerAddr") - } - - r.signature = &signature - r.updateAddr = r.UpdateAddr() - return nil -} - -// create an update chunk. -func (r *SignedResourceUpdate) toChunk() (storage.Chunk, error) { - - // Check that the update is signed and serialized - // For efficiency, data is serialized during signature and cached in - // the binaryData field when computing the signature digest in .getDigest() - if r.signature == nil || r.binaryData == nil { - return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.") - } - - resourceUpdateLength := r.resourceUpdate.binaryLength() - // signature is the last item in the chunk data - copy(r.binaryData[resourceUpdateLength:], r.signature[:]) - - chunk := storage.NewChunk(r.updateAddr, r.binaryData) - return chunk, nil -} - -// fromChunk populates this structure from chunk data. It does not verify the signature is valid. -func (r *SignedResourceUpdate) fromChunk(updateAddr storage.Address, chunkdata []byte) error { - // for update chunk layout see SignedResourceUpdate definition - - //deserialize the resource update portion - if err := r.resourceUpdate.binaryGet(chunkdata); err != nil { - return err - } - - // Extract the signature - var signature *Signature - cursor := r.resourceUpdate.binaryLength() - sigdata := chunkdata[cursor : cursor+signatureLength] - if len(sigdata) > 0 { - signature = &Signature{} - copy(signature[:], sigdata) - } - - r.signature = signature - r.updateAddr = updateAddr - r.binaryData = chunkdata - - return nil - -} - -// GetDigest creates the resource update digest used in signatures (formerly known as keyDataHash) -// the serialized payload is cached in .binaryData -func (r *SignedResourceUpdate) GetDigest() (result common.Hash, err error) { - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - dataLength := r.resourceUpdate.binaryLength() - if r.binaryData == nil { - r.binaryData = make([]byte, dataLength+signatureLength) - if err := r.resourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil { - return result, err - } - } - hasher.Write(r.binaryData[:dataLength]) //everything except the signature. - - return common.BytesToHash(hasher.Sum(nil)), nil -} - -// getOwner extracts the address of the resource update signer -func getOwner(digest common.Hash, signature Signature) (common.Address, error) { - pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) - if err != nil { - return common.Address{}, err - } - return crypto.PubkeyToAddress(*pub), nil -} - -// verifyResourceOwnerhsip checks that the signer of the update actually owns the resource -// H(ownerAddr, metaHash) is computed. If it matches the rootAddr the update chunk is claiming -// to update, it is proven that signer of the resource update owns the resource. -// See metadataHash in metadata.go for a more detailed explanation -func verifyOwner(ownerAddr common.Address, metaHash []byte, rootAddr storage.Address) bool { - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(metaHash) - hasher.Write(ownerAddr.Bytes()) - rootAddr2 := hasher.Sum(nil) - return bytes.Equal(rootAddr2, rootAddr) -} diff --git a/swarm/storage/mru/timestampprovider.go b/swarm/storage/mru/timestampprovider.go index f483491aa..6ac153213 100644 --- a/swarm/storage/mru/timestampprovider.go +++ b/swarm/storage/mru/timestampprovider.go @@ -18,15 +18,16 @@ package mru import ( "encoding/binary" + "encoding/json" "time" ) // TimestampProvider sets the time source of the mru package var TimestampProvider timestampProvider = NewDefaultTimestampProvider() -// Encodes a point in time as a Unix epoch +// Timestamp encodes a point in time as a Unix epoch type Timestamp struct { - Time uint64 // Unix epoch timestamp, in seconds + Time uint64 `json:"time"` // Unix epoch timestamp, in seconds } // 8 bytes uint64 Time @@ -55,6 +56,18 @@ func (t *Timestamp) binaryPut(data []byte) error { return nil } +// UnmarshalJSON implements the json.Unmarshaller interface +func (t *Timestamp) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &t.Time) +} + +// MarshalJSON implements the json.Marshaller interface +func (t *Timestamp) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Time) +} + +// DefaultTimestampProvider is a TimestampProvider that uses system time +// as time source type DefaultTimestampProvider struct { } diff --git a/swarm/storage/mru/topic.go b/swarm/storage/mru/topic.go new file mode 100644 index 000000000..f318a5593 --- /dev/null +++ b/swarm/storage/mru/topic.go @@ -0,0 +1,105 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// TopicLength establishes the max length of a topic string +const TopicLength = storage.AddressLength + +// Topic represents what a resource talks about +type Topic [TopicLength]byte + +// ErrTopicTooLong is returned when creating a topic with a name/related content too long +var ErrTopicTooLong = fmt.Errorf("Topic is too long. Max length is %d", TopicLength) + +// NewTopic creates a new topic from a provided name and "related content" byte array, +// merging the two together. +// If relatedContent or name are longer than TopicLength, they will be truncated and an error returned +// name can be an empty string +// relatedContent can be nil +func NewTopic(name string, relatedContent []byte) (topic Topic, err error) { + if relatedContent != nil { + contentLength := len(relatedContent) + if contentLength > TopicLength { + contentLength = TopicLength + err = ErrTopicTooLong + } + copy(topic[:], relatedContent[:contentLength]) + } + nameBytes := []byte(name) + nameLength := len(nameBytes) + if nameLength > TopicLength { + nameLength = TopicLength + err = ErrTopicTooLong + } + bitutil.XORBytes(topic[:], topic[:], nameBytes[:nameLength]) + return topic, err +} + +// Hex will return the topic encoded as an hex string +func (t *Topic) Hex() string { + return hexutil.Encode(t[:]) +} + +// FromHex will parse a hex string into this Topic instance +func (t *Topic) FromHex(hex string) error { + bytes, err := hexutil.Decode(hex) + if err != nil || len(bytes) != len(t) { + return NewErrorf(ErrInvalidValue, "Cannot decode topic") + } + copy(t[:], bytes) + return nil +} + +// Name will try to extract the resource name out of the topic +func (t *Topic) Name(relatedContent []byte) string { + nameBytes := *t + if relatedContent != nil { + contentLength := len(relatedContent) + if contentLength > TopicLength { + contentLength = TopicLength + } + bitutil.XORBytes(nameBytes[:], t[:], relatedContent[:contentLength]) + } + z := bytes.IndexByte(nameBytes[:], 0) + if z < 0 { + z = TopicLength + } + return string(nameBytes[:z]) + +} + +// UnmarshalJSON implements the json.Unmarshaller interface +func (t *Topic) UnmarshalJSON(data []byte) error { + var hex string + json.Unmarshal(data, &hex) + return t.FromHex(hex) +} + +// MarshalJSON implements the json.Marshaller interface +func (t *Topic) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Hex()) +} diff --git a/swarm/storage/mru/topic_test.go b/swarm/storage/mru/topic_test.go new file mode 100644 index 000000000..dad7c7ddc --- /dev/null +++ b/swarm/storage/mru/topic_test.go @@ -0,0 +1,50 @@ +package mru + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +func TestTopic(t *testing.T) { + related, _ := hexutil.Decode("0xabcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789") + topicName := "test-topic" + topic, _ := NewTopic(topicName, related) + hex := topic.Hex() + expectedHex := "0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789" + if hex != expectedHex { + t.Fatalf("Expected %s, got %s", expectedHex, hex) + } + + var topic2 Topic + topic2.FromHex(hex) + if topic2 != topic { + t.Fatal("Expected recovered topic to be equal to original one") + } + + if topic2.Name(related) != topicName { + t.Fatal("Retrieved name does not match") + } + + bytes, err := topic2.MarshalJSON() + if err != nil { + t.Fatal(err) + } + expectedJSON := `"0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"` + equal, err := areEqualJSON(expectedJSON, string(bytes)) + if err != nil { + t.Fatal(err) + } + if !equal { + t.Fatalf("Expected JSON to be %s, got %s", expectedJSON, string(bytes)) + } + + err = topic2.UnmarshalJSON(bytes) + if err != nil { + t.Fatal(err) + } + if topic2 != topic { + t.Fatal("Expected recovered topic to be equal to original one") + } + +} diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go index d1bd37ddf..6aa57fce1 100644 --- a/swarm/storage/mru/update.go +++ b/swarm/storage/mru/update.go @@ -17,36 +17,35 @@ package mru import ( - "encoding/binary" - "errors" + "fmt" + "strconv" "github.com/ethereum/go-ethereum/swarm/chunk" - "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/multihash" ) -// resourceUpdate encapsulates the information sent as part of a resource update -type resourceUpdate struct { - updateHeader // metainformationa about this resource update - data []byte // actual data payload +// ProtocolVersion defines the current version of the protocol that will be included in each update message +const ProtocolVersion uint8 = 0 + +const headerLength = 8 + +// Header defines a update message header including a protocol version byte +type Header struct { + Version uint8 // Protocol version + Padding [headerLength - 1]uint8 // reserved for future use } -// Update chunk layout -// Prefix: -// 2 bytes updateHeaderLength -// 2 bytes data length -const chunkPrefixLength = 2 + 2 +// ResourceUpdate encapsulates the information sent as part of a resource update +type ResourceUpdate struct { + Header Header // + ID // Resource update identifying information + data []byte // actual data payload +} -// Header: (see updateHeader) -// Data: -// data (datalength bytes) -// -// Minimum size is Header + 1 (minimum data length, enforced) -const minimumUpdateDataLength = updateHeaderLength + 1 -const maxUpdateDataLength = chunk.DefaultSize - signatureLength - updateHeaderLength - chunkPrefixLength +const minimumUpdateDataLength = idLength + headerLength + 1 +const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength // binaryPut serializes the resource update information into the given slice -func (r *resourceUpdate) binaryPut(serializedData []byte) error { +func (r *ResourceUpdate) binaryPut(serializedData []byte) error { datalength := len(r.data) if datalength == 0 { return NewError(ErrInvalidValue, "cannot update a resource with no data") @@ -60,26 +59,17 @@ func (r *resourceUpdate) binaryPut(serializedData []byte) error { return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength()) } - if r.multihash { - if _, _, err := multihash.GetMultihashLength(r.data); err != nil { - return NewError(ErrInvalidValue, "Invalid multihash") - } - } - - // Add prefix: updateHeaderLength and actual data length - cursor := 0 - binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(updateHeaderLength)) - cursor += 2 + var cursor int + // serialize Header + serializedData[cursor] = r.Header.Version + copy(serializedData[cursor+1:headerLength], r.Header.Padding[:headerLength-1]) + cursor += headerLength - // data length - binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(datalength)) - cursor += 2 - - // serialize header (see updateHeader) - if err := r.updateHeader.binaryPut(serializedData[cursor : cursor+updateHeaderLength]); err != nil { + // serialize ID + if err := r.ID.binaryPut(serializedData[cursor : cursor+idLength]); err != nil { return err } - cursor += updateHeaderLength + cursor += idLength // add the data copy(serializedData[cursor:], r.data) @@ -89,60 +79,54 @@ func (r *resourceUpdate) binaryPut(serializedData []byte) error { } // binaryLength returns the expected number of bytes this structure will take to encode -func (r *resourceUpdate) binaryLength() int { - return chunkPrefixLength + updateHeaderLength + len(r.data) +func (r *ResourceUpdate) binaryLength() int { + return idLength + headerLength + len(r.data) } // binaryGet populates this instance from the information contained in the passed byte slice -func (r *resourceUpdate) binaryGet(serializedData []byte) error { +func (r *ResourceUpdate) binaryGet(serializedData []byte) error { if len(serializedData) < minimumUpdateDataLength { return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength) } - cursor := 0 - declaredHeaderlength := binary.LittleEndian.Uint16(serializedData[cursor : cursor+2]) - if declaredHeaderlength != updateHeaderLength { - return NewErrorf(ErrCorruptData, "Invalid header length. Expected %d, got %d", updateHeaderLength, declaredHeaderlength) - } + dataLength := len(serializedData) - idLength - headerLength + // at this point we can be satisfied that we have the correct data length to read - cursor += 2 - datalength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) - cursor += 2 + var cursor int - if chunkPrefixLength+updateHeaderLength+datalength+signatureLength != len(serializedData) { - return NewError(ErrNothingToReturn, "length specified in header is different than actual chunk size") - } + // deserialize Header + r.Header.Version = serializedData[cursor] // extract the protocol version + copy(r.Header.Padding[:headerLength-1], serializedData[cursor+1:headerLength]) // extract the padding + cursor += headerLength - // at this point we can be satisfied that we have the correct data length to read - if err := r.updateHeader.binaryGet(serializedData[cursor : cursor+updateHeaderLength]); err != nil { + if err := r.ID.binaryGet(serializedData[cursor : cursor+idLength]); err != nil { return err } - cursor += updateHeaderLength - - data := serializedData[cursor : cursor+datalength] - cursor += datalength + cursor += idLength - // if multihash content is indicated we check the validity of the multihash - if r.updateHeader.multihash { - mhLength, mhHeaderLength, err := multihash.GetMultihashLength(data) - if err != nil { - log.Error("multihash parse error", "err", err) - return err - } - if datalength != mhLength+mhHeaderLength { - log.Debug("multihash error", "datalength", datalength, "mhLength", mhLength, "mhHeaderLength", mhHeaderLength) - return errors.New("Corrupt multihash data") - } - } + data := serializedData[cursor : cursor+dataLength] + cursor += dataLength // now that all checks have passed, copy data into structure - r.data = make([]byte, datalength) + r.data = make([]byte, dataLength) copy(r.data, data) return nil } -// Multihash specifies whether the resource data should be interpreted as multihash -func (r *resourceUpdate) Multihash() bool { - return r.multihash +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (r *ResourceUpdate) FromValues(values Values, data []byte) error { + r.data = data + version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32) + r.Header.Version = uint8(version) + return r.ID.FromValues(values) +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (r *ResourceUpdate) AppendValues(values Values) []byte { + r.ID.AppendValues(values) + values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version)) + return r.data } diff --git a/swarm/storage/mru/update_test.go b/swarm/storage/mru/update_test.go index 51e9d2fcc..bd706d83a 100644 --- a/swarm/storage/mru/update_test.go +++ b/swarm/storage/mru/update_test.go @@ -1,72 +1,50 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + package mru import ( - "bytes" "testing" ) -const serializedUpdateHex = "0x490034004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf000456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f" -const serializedUpdateMultihashHex = "0x490022004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0011b200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1c1e1f20" - -func getTestResourceUpdate() *resourceUpdate { - return &resourceUpdate{ - updateHeader: *getTestUpdateHeader(false), - data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), - } -} - -func getTestResourceUpdateMultihash() *resourceUpdate { - return &resourceUpdate{ - updateHeader: *getTestUpdateHeader(true), - data: []byte{0x1b, 0x20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 30, 31, 32}, +func getTestResourceUpdate() *ResourceUpdate { + return &ResourceUpdate{ + ID: *getTestID(), + data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), } } -func compareResourceUpdate(a, b *resourceUpdate) bool { - return compareUpdateHeader(&a.updateHeader, &b.updateHeader) && - bytes.Equal(a.data, b.data) -} - func TestResourceUpdateSerializer(t *testing.T) { - var serializedUpdateLength = len(serializedUpdateHex)/2 - 1 // hack to calculate the byte length out of the hex representation - update := getTestResourceUpdate() - serializedUpdate := make([]byte, serializedUpdateLength) - if err := update.binaryPut(serializedUpdate); err != nil { - t.Fatal(err) - } - compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateHex) - - // Test fail if update does not contain data - update.data = nil - if err := update.binaryPut(serializedUpdate); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since update does not contain data") - } + testBinarySerializerRecovery(t, getTestResourceUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") +} +func TestResourceUpdateLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestResourceUpdate()) // Test fail if update is too big - update.data = make([]byte, 10000) - if err := update.binaryPut(serializedUpdate); err == nil { + update := getTestResourceUpdate() + update.data = make([]byte, maxUpdateDataLength+100) + serialized := make([]byte, update.binaryLength()) + if err := update.binaryPut(serialized); err == nil { t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big") } - // Test fail if passed slice is not of the exact size required for this update - update.data = make([]byte, 1) - if err := update.binaryPut(serializedUpdate); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since passed slice is not of the appropriate size") - } - - // Test serializing a multihash update - var serializedUpdateMultihashLength = len(serializedUpdateMultihashHex)/2 - 1 // hack to calculate the byte length out of the hex representation - update = getTestResourceUpdateMultihash() - serializedUpdate = make([]byte, serializedUpdateMultihashLength) - if err := update.binaryPut(serializedUpdate); err != nil { - t.Fatal(err) - } - compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateMultihashHex) - - // mess with the multihash to test it fails with a wrong multihash error - update.data[1] = 79 - if err := update.binaryPut(serializedUpdate); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since data contains an invalid multihash") + // test fail if data is empty or nil + update.data = nil + serialized = make([]byte, update.binaryLength()) + if err := update.binaryPut(serialized); err == nil { + t.Fatal("Expected resourceUpdate.binaryPut to fail since data is empty") } - } diff --git a/swarm/storage/mru/updateheader.go b/swarm/storage/mru/updateheader.go deleted file mode 100644 index f0039eaf6..000000000 --- a/swarm/storage/mru/updateheader.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// updateHeader models the non-payload components of a Resource Update -type updateHeader struct { - UpdateLookup // UpdateLookup contains the information required to locate this resource (components of the search key used to find it) - multihash bool // Whether the data in this Resource Update should be interpreted as multihash - metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource. -} - -const metaHashLength = storage.AddressLength - -// updateLookupLength bytes -// 1 byte flags (multihash bool for now) -// 32 bytes metaHash -const updateHeaderLength = updateLookupLength + 1 + metaHashLength - -// binaryPut serializes the resource header information into the given slice -func (h *updateHeader) binaryPut(serializedData []byte) error { - if len(serializedData) != updateHeaderLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData)) - } - if len(h.metaHash) != metaHashLength { - return NewError(ErrInvalidValue, "updateHeader.binaryPut called without metaHash set") - } - if err := h.UpdateLookup.binaryPut(serializedData[:updateLookupLength]); err != nil { - return err - } - cursor := updateLookupLength - copy(serializedData[cursor:], h.metaHash[:metaHashLength]) - cursor += metaHashLength - - var flags byte - if h.multihash { - flags |= 0x01 - } - - serializedData[cursor] = flags - cursor++ - - return nil -} - -// binaryLength returns the expected size of this structure when serialized -func (h *updateHeader) binaryLength() int { - return updateHeaderLength -} - -// binaryGet restores the current updateHeader instance from the information contained in the passed slice -func (h *updateHeader) binaryGet(serializedData []byte) error { - if len(serializedData) != updateHeaderLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData)) - } - - if err := h.UpdateLookup.binaryGet(serializedData[:updateLookupLength]); err != nil { - return err - } - cursor := updateLookupLength - h.metaHash = make([]byte, metaHashLength) - copy(h.metaHash[:storage.AddressLength], serializedData[cursor:cursor+storage.AddressLength]) - cursor += metaHashLength - - flags := serializedData[cursor] - cursor++ - - h.multihash = flags&0x01 != 0 - - return nil -} diff --git a/swarm/storage/mru/updateheader_test.go b/swarm/storage/mru/updateheader_test.go deleted file mode 100644 index b1f505989..000000000 --- a/swarm/storage/mru/updateheader_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package mru - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -const serializedUpdateHeaderMultihashHex = "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf001" - -func getTestUpdateHeader(multihash bool) (header *updateHeader) { - _, metaHash, _, _ := getTestMetadata().serializeAndHash() - return &updateHeader{ - UpdateLookup: *getTestUpdateLookup(), - multihash: multihash, - metaHash: metaHash, - } -} - -func compareUpdateHeader(a, b *updateHeader) bool { - return compareUpdateLookup(&a.UpdateLookup, &b.UpdateLookup) && - a.multihash == b.multihash && - bytes.Equal(a.metaHash, b.metaHash) -} - -func TestUpdateHeaderSerializer(t *testing.T) { - header := getTestUpdateHeader(true) - serializedHeader := make([]byte, updateHeaderLength) - if err := header.binaryPut(serializedHeader); err != nil { - t.Fatal(err) - } - compareByteSliceToExpectedHex(t, "serializedHeader", serializedHeader, serializedUpdateHeaderMultihashHex) - - // trigger incorrect slice length error passing a slice that is 1 byte too big - if err := header.binaryPut(make([]byte, updateHeaderLength+1)); err == nil { - t.Fatal("Expected updateHeader.binaryPut to fail since supplied slice is of incorrect length") - } - - // trigger invalid metaHash error - header.metaHash = nil - if err := header.binaryPut(serializedHeader); err == nil { - t.Fatal("Expected updateHeader.binaryPut to fail metaHash is of incorrect length") - } -} - -func TestUpdateHeaderDeserializer(t *testing.T) { - originalUpdate := getTestUpdateHeader(true) - serializedData, _ := hexutil.Decode(serializedUpdateHeaderMultihashHex) - var retrievedUpdate updateHeader - if err := retrievedUpdate.binaryGet(serializedData); err != nil { - t.Fatal(err) - } - if !compareUpdateHeader(originalUpdate, &retrievedUpdate) { - t.Fatalf("Expected deserialized structure to equal the original") - } - - // mess with source slice to test length checks - serializedData = []byte{1, 2, 3} - if err := retrievedUpdate.binaryGet(serializedData); err == nil { - t.Fatal("Expected retrievedUpdate.binaryGet, since passed slice is too small") - } - -} diff --git a/swarm/storage/mru/view.go b/swarm/storage/mru/view.go new file mode 100644 index 000000000..2e4ce4a0b --- /dev/null +++ b/swarm/storage/mru/view.go @@ -0,0 +1,125 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "hash" + "unsafe" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// View represents a particular user's view of a resource +type View struct { + Topic Topic `json:"topic"` + User common.Address `json:"user"` +} + +// View layout: +// TopicLength bytes +// userAddr common.AddressLength bytes +const viewLength = TopicLength + common.AddressLength + +// mapKey calculates a unique id for this view for the cache map in `Handler` +func (u *View) mapKey() uint64 { + serializedData := make([]byte, viewLength) + u.binaryPut(serializedData) + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + hash := hasher.Sum(nil) + return *(*uint64)(unsafe.Pointer(&hash[0])) +} + +// binaryPut serializes this View instance into the provided slice +func (u *View) binaryPut(serializedData []byte) error { + if len(serializedData) != viewLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", viewLength, len(serializedData)) + } + var cursor int + copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength]) + cursor += TopicLength + + copy(serializedData[cursor:cursor+common.AddressLength], u.User[:]) + cursor += common.AddressLength + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (u *View) binaryLength() int { + return viewLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (u *View) binaryGet(serializedData []byte) error { + if len(serializedData) != viewLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read View. Expected %d, got %d", viewLength, len(serializedData)) + } + + var cursor int + copy(u.Topic[:], serializedData[cursor:cursor+TopicLength]) + cursor += TopicLength + + copy(u.User[:], serializedData[cursor:cursor+common.AddressLength]) + cursor += common.AddressLength + + return nil +} + +// Hex serializes the View to a hex string +func (u *View) Hex() string { + serializedData := make([]byte, viewLength) + u.binaryPut(serializedData) + return hexutil.Encode(serializedData) +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (u *View) FromValues(values Values) (err error) { + topic := values.Get("topic") + if topic != "" { + if err := u.Topic.FromHex(values.Get("topic")); err != nil { + return err + } + } else { // see if the user set name and relatedcontent + name := values.Get("name") + relatedContent, _ := hexutil.Decode(values.Get("relatedcontent")) + if len(relatedContent) > 0 { + if len(relatedContent) < storage.AddressLength { + return NewErrorf(ErrInvalidValue, "relatedcontent field must be a hex-encoded byte array exactly %d bytes long", storage.AddressLength) + } + relatedContent = relatedContent[:storage.AddressLength] + } + u.Topic, err = NewTopic(name, relatedContent) + if err != nil { + return err + } + } + u.User = common.HexToAddress(values.Get("user")) + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (u *View) AppendValues(values Values) { + values.Set("topic", u.Topic.Hex()) + values.Set("user", u.User.Hex()) +} diff --git a/swarm/storage/mru/view_test.go b/swarm/storage/mru/view_test.go new file mode 100644 index 000000000..45720ba79 --- /dev/null +++ b/swarm/storage/mru/view_test.go @@ -0,0 +1,36 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. +package mru + +import ( + "testing" +) + +func getTestView() *View { + topic, _ := NewTopic("world news report, every hour", nil) + return &View{ + Topic: topic, + User: newCharlieSigner().Address(), + } +} + +func TestViewSerializerDeserializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestView(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") +} + +func TestMetadataSerializerLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestView()) +} diff --git a/swarm/swarm.go b/swarm/swarm.go index 8b2661529..0cd56d4eb 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -180,6 +180,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e DoSync: config.SyncEnabled, DoRetrieve: true, SyncUpdateDelay: config.SyncUpdateDelay, + MaxPeerServers: config.MaxStreamPeerServers, }) // Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index 074823032..2309c39f0 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -32,19 +32,6 @@ type TestServer interface { ServeHTTP(http.ResponseWriter, *http.Request) } -// simulated timeProvider -type fakeTimeProvider struct { - currentTime uint64 -} - -func (f *fakeTimeProvider) Tick() { - f.currentTime++ -} - -func (f *fakeTimeProvider) Now() mru.Timestamp { - return mru.Timestamp{Time: f.currentTime} -} - func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, resolver api.Resolver) *TestSwarmServer { dir, err := ioutil.TempDir("", "swarm-storage-test") if err != nil { @@ -67,10 +54,6 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso t.Fatal(err) } - fakeTimeProvider := &fakeTimeProvider{ - currentTime: 42, - } - mru.TimestampProvider = fakeTimeProvider rhparams := &mru.HandlerParams{} rh, err := mru.NewTestHandler(resourceDir, rhparams) if err != nil { @@ -79,34 +62,36 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso a := api.NewAPI(fileStore, resolver, rh.Handler, nil) srv := httptest.NewServer(serverFunc(a)) - return &TestSwarmServer{ - Server: srv, - FileStore: fileStore, - dir: dir, - Hasher: storage.MakeHashFunc(storage.DefaultHash)(), - timestampProvider: fakeTimeProvider, + tss := &TestSwarmServer{ + Server: srv, + FileStore: fileStore, + dir: dir, + Hasher: storage.MakeHashFunc(storage.DefaultHash)(), cleanup: func() { srv.Close() rh.Close() os.RemoveAll(dir) os.RemoveAll(resourceDir) }, + CurrentTime: 42, } + mru.TimestampProvider = tss + return tss } type TestSwarmServer struct { *httptest.Server - Hasher storage.SwarmHash - FileStore *storage.FileStore - dir string - cleanup func() - timestampProvider *fakeTimeProvider + Hasher storage.SwarmHash + FileStore *storage.FileStore + dir string + cleanup func() + CurrentTime uint64 } func (t *TestSwarmServer) Close() { t.cleanup() } -func (t *TestSwarmServer) GetCurrentTime() mru.Timestamp { - return t.timestampProvider.Now() +func (t *TestSwarmServer) Now() mru.Timestamp { + return mru.Timestamp{Time: t.CurrentTime} } diff --git a/tests/block_test.go b/tests/block_test.go index c91119929..8315728a6 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -32,9 +32,13 @@ func TestBlockchain(t *testing.T) { bt.skipLoad(`^bcTotalDifficultyTest/(lotsOfLeafs|lotsOfBranches|sideChainWithMoreTransactions)`) // This test is broken bt.fails(`blockhashNonConstArg_Constantinople`, "Broken test") - - // Still failing tests - // bt.skipLoad(`^bcWalletTest.*_Byzantium$`) + // Slow tests + bt.slow(`^bcExploitTest/DelegateCallSpam.json`) + bt.slow(`^bcExploitTest/ShanghaiLove.json`) + bt.slow(`^bcExploitTest/SuicideIssue.json`) + bt.slow(`^bcForkStressTest/`) + bt.slow(`^bcGasPricerTest/RPC_API_Test.json`) + bt.slow(`^bcWalletTest/`) bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { if err := bt.checkFailure(t, name, test.Run()); err != nil { diff --git a/tests/init_test.go b/tests/init_test.go index 90a74448a..053cbd6fc 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -25,6 +25,7 @@ import ( "path/filepath" "reflect" "regexp" + "runtime" "sort" "strings" "testing" @@ -90,7 +91,7 @@ type testMatcher struct { configpat []testConfig failpat []testFailure skiploadpat []*regexp.Regexp - skipshortpat []*regexp.Regexp + slowpat []*regexp.Regexp whitelistpat *regexp.Regexp } @@ -105,8 +106,8 @@ type testFailure struct { } // skipShortMode skips tests matching when the -short flag is used. -func (tm *testMatcher) skipShortMode(pattern string) { - tm.skipshortpat = append(tm.skipshortpat, regexp.MustCompile(pattern)) +func (tm *testMatcher) slow(pattern string) { + tm.slowpat = append(tm.slowpat, regexp.MustCompile(pattern)) } // skipLoad skips JSON loading of tests matching the pattern. @@ -133,11 +134,15 @@ func (tm *testMatcher) config(pattern string, cfg params.ChainConfig) { // findSkip matches name against test skip patterns. func (tm *testMatcher) findSkip(name string) (reason string, skipload bool) { - if testing.Short() { - for _, re := range tm.skipshortpat { - if re.MatchString(name) { + isWin32 := runtime.GOARCH == "386" && runtime.GOOS == "windows" + for _, re := range tm.slowpat { + if re.MatchString(name) { + if testing.Short() { return "skipped in -short mode", false } + if isWin32 { + return "skipped on 32bit windows", false + } } } for _, re := range tm.skiploadpat { diff --git a/tests/state_test.go b/tests/state_test.go index 91c9a9f44..c52e9abb8 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -30,7 +30,15 @@ func TestState(t *testing.T) { st := new(testMatcher) // Long tests: - st.skipShortMode(`^stQuadraticComplexityTest/`) + st.slow(`^stAttackTest/ContractCreationSpam`) + st.slow(`^stBadOpcode/badOpcodes`) + st.slow(`^stPreCompiledContracts/modexp`) + st.slow(`^stQuadraticComplexityTest/`) + st.slow(`^stStaticCall/static_Call50000`) + st.slow(`^stStaticCall/static_Return50000`) + st.slow(`^stStaticCall/static_Call1MB`) + st.slow(`^stSystemOperationsTest/CallRecursiveBomb`) + st.slow(`^stTransactionTest/Opcodes_TransactionInit`) // Broken tests: st.skipLoad(`^stTransactionTest/OverflowGasRequire\.json`) // gasLimit > 256 bits st.skipLoad(`^stTransactionTest/zeroSigTransa[^/]*\.json`) // EIP-86 is not supported yet diff --git a/tests/vm_test.go b/tests/vm_test.go index c9f5e225e..441483dff 100644 --- a/tests/vm_test.go +++ b/tests/vm_test.go @@ -25,13 +25,9 @@ import ( func TestVM(t *testing.T) { t.Parallel() vmt := new(testMatcher) + vmt.slow("^vmPerformance") vmt.fails("^vmSystemOperationsTest.json/createNameRegistrator$", "fails without parallel execution") - vmt.skipLoad(`^vmInputLimits(Light)?.json`) // log format broken - - vmt.skipShortMode("^vmPerformanceTest.json") - vmt.skipShortMode("^vmInputLimits(Light)?.json") - vmt.walk(t, vmTestDir, func(t *testing.T, name string, test *VMTest) { withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error { return vmt.checkFailure(t, name, test.Run(vmconfig)) diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index e84226a73..56729a92c 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,5 +1,10 @@ # go-colorable +[](http://godoc.org/github.com/mattn/go-colorable) +[](https://travis-ci.org/mattn/go-colorable) +[](https://coveralls.io/github/mattn/go-colorable?branch=master) +[](https://goreportcard.com/report/mattn/go-colorable) + Colorable writer for windows. For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index a7fe19a8c..887f203dc 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,10 +1,13 @@ // +build !windows +// +build !appengine package colorable import ( "io" "os" + + _ "github.com/mattn/go-isatty" ) // NewColorable return new instance of Writer which handle escape sequence. diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 628ad904e..404e10ca0 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -1,3 +1,6 @@ +// +build windows +// +build !appengine + package colorable import ( @@ -26,6 +29,15 @@ const ( backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) ) +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + type wchar uint16 type short int16 type dword uint32 @@ -65,14 +77,18 @@ var ( procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) +// Writer provide colorable Writer to the console type Writer struct { - out io.Writer - handle syscall.Handle - lastbuf bytes.Buffer - oldattr word - oldpos coord + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer } // NewColorable return new instance of Writer which handle escape sequence from File. @@ -86,9 +102,8 @@ func NewColorable(file *os.File) io.Writer { handle := syscall.Handle(file.Fd()) procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} - } else { - return file } + return file } // NewColorableStdout return new instance of Writer which handle escape sequence for stdout. @@ -360,20 +375,65 @@ var color256 = map[int]int{ 255: 0xeeeeee, } +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + // Write write data on console func (w *Writer) Write(data []byte) (n int, err error) { var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - er := bytes.NewReader(data) + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } var bw [1]byte loop: for { - r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - if r1 == 0 { - break loop - } - c1, err := er.ReadByte() if err != nil { break loop @@ -385,155 +445,202 @@ loop: } c2, err := er.ReadByte() if err != nil { - w.lastbuf.WriteByte(c1) break loop } - if c2 != 0x5b { - w.lastbuf.WriteByte(c1) - w.lastbuf.WriteByte(c2) + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: continue } + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + var buf bytes.Buffer var m byte - for { - c, err := er.ReadByte() - if err != nil { - w.lastbuf.WriteByte(c1) - w.lastbuf.WriteByte(c2) - w.lastbuf.Write(buf.Bytes()) - break loop - } + for i, c := range w.rest.Bytes()[2:] { if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() break } buf.Write([]byte(string(c))) } + if m == 0 { + break loop + } - var csbi consoleScreenBufferInfo switch m { case 'A': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'B': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'C': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'D': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - if n, err = strconv.Atoi(buf.String()); err == nil { - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'E': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'F': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'G': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = short(n - 1) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'H': - token := strings.Split(buf.String(), ";") - if len(token) != 2 { - continue - } - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - n2, err := strconv.Atoi(token[1]) - if err != nil { - continue + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 } - csbi.cursorPosition.x = short(n2 - 1) - csbi.cursorPosition.y = short(n1 - 1) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'J': - n, err := strconv.Atoi(buf.String()) - if err != nil { - continue + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + var count, written dword var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) switch n { case 0: cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) case 1: cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) case 2: cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) } - var count, written dword - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) - procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'K': - n, err := strconv.Atoi(buf.String()) - if err != nil { - continue + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) var cursor coord + var count, written dword switch n { case 0: cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) case 1: - cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) case 2: - cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) } - var count, written dword - count = dword(csbi.size.x - csbi.cursorPosition.x) - procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'm': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) attr := csbi.attributes cs := buf.String() if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) continue } token := strings.Split(cs, ";") @@ -547,7 +654,7 @@ loop: attr |= foregroundIntensity case n == 7: attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case 22 == n || n == 25 || n == 25: + case n == 22 || n == 25: attr |= foregroundIntensity case n == 27: attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) @@ -572,6 +679,21 @@ loop: attr |= n256foreAttr[n256] i += 2 } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } } else { attr = attr & (w.oldattr & backgroundMask) } @@ -599,6 +721,21 @@ loop: attr |= n256backAttr[n256] i += 2 } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } } else { attr = attr & (w.oldattr & foregroundMask) } @@ -630,33 +767,56 @@ loop: attr |= backgroundBlue } } - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) } } case 'h': + var ci consoleCursorInfo cs := buf.String() - if cs == "?25" { - var ci consoleCursorInfo - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } } case 'l': + var ci consoleCursorInfo cs := buf.String() - if cs == "?25" { - var ci consoleCursorInfo - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } } case 's': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) w.oldpos = csbi.cursorPosition case 'u': - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) } } - return len(data) - w.lastbuf.Len(), nil + + return len(data), nil } type consoleColor struct { @@ -700,22 +860,22 @@ func (c consoleColor) backgroundAttr() (attr word) { } var color16 = []consoleColor{ - consoleColor{0x000000, false, false, false, false}, - consoleColor{0x000080, false, false, true, false}, - consoleColor{0x008000, false, true, false, false}, - consoleColor{0x008080, false, true, true, false}, - consoleColor{0x800000, true, false, false, false}, - consoleColor{0x800080, true, false, true, false}, - consoleColor{0x808000, true, true, false, false}, - consoleColor{0xc0c0c0, true, true, true, false}, - consoleColor{0x808080, false, false, false, true}, - consoleColor{0x0000ff, false, false, true, true}, - consoleColor{0x00ff00, false, true, false, true}, - consoleColor{0x00ffff, false, true, true, true}, - consoleColor{0xff0000, true, false, false, true}, - consoleColor{0xff00ff, true, false, true, true}, - consoleColor{0xffff00, true, true, false, true}, - consoleColor{0xffffff, true, true, true, true}, + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, } type hsv struct { diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index ca588c78a..9721e16f4 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -7,8 +7,7 @@ import ( // NonColorable hold writer but remove escape sequence. type NonColorable struct { - out io.Writer - lastbuf bytes.Buffer + out io.Writer } // NewNonColorable return new instance of Writer which remove escape sequence from Writer. @@ -33,12 +32,9 @@ loop: } c2, err := er.ReadByte() if err != nil { - w.lastbuf.WriteByte(c1) break loop } if c2 != 0x5b { - w.lastbuf.WriteByte(c1) - w.lastbuf.WriteByte(c2) continue } @@ -46,9 +42,6 @@ loop: for { c, err := er.ReadByte() if err != nil { - w.lastbuf.WriteByte(c1) - w.lastbuf.WriteByte(c2) - w.lastbuf.Write(buf.Bytes()) break loop } if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { @@ -57,5 +50,6 @@ loop: buf.Write([]byte(string(c))) } } - return len(data) - w.lastbuf.Len(), nil + + return len(data), nil } diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md index 8e4365f45..1e69004bb 100644 --- a/vendor/github.com/mattn/go-isatty/README.md +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -1,6 +1,9 @@ # go-isatty -[](https://travis-ci.org/mattn/go-isatty) [](https://coveralls.io/github/mattn/go-isatty?branch=master) +[](http://godoc.org/github.com/mattn/go-isatty) +[](https://travis-ci.org/mattn/go-isatty) +[](https://coveralls.io/github/mattn/go-isatty?branch=master) +[](https://goreportcard.com/report/mattn/go-isatty) isatty for golang diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go deleted file mode 100644 index 83c588773..000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build appengine - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on on appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 42f2514d1..07e93039d 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -16,3 +16,9 @@ func IsTerminal(fd uintptr) bool { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go index 9d24bac1d..1f4002617 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_linux.go +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -1,5 +1,5 @@ // +build linux -// +build !appengine +// +build !appengine,!ppc64,!ppc64le package isatty @@ -16,3 +16,9 @@ func IsTerminal(fd uintptr) bool { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_not_windows.go b/vendor/github.com/mattn/go-isatty/isatty_not_windows.go deleted file mode 100644 index 616832d23..000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_not_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows appengine - -package isatty - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go index 1f0c6bf53..bdd5c79a0 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -14,3 +14,9 @@ func IsTerminal(fd uintptr) bool { err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) return err == nil } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go index 2164497ad..82568a1bb 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -1,13 +1,24 @@ package runewidth +import "os" + var ( // EastAsianWidth will be set true if the current locale is CJK - EastAsianWidth = IsEastAsian() + EastAsianWidth bool // DefaultCondition is a condition in current locale DefaultCondition = &Condition{EastAsianWidth} ) +func init() { + env := os.Getenv("RUNEWIDTH_EASTASIAN") + if env == "" { + EastAsianWidth = IsEastAsian() + } else { + EastAsianWidth = env == "1" + } +} + type interval struct { first rune last rune @@ -55,6 +66,7 @@ var private = table{ var nonprint = table{ {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, + {0x2028, 0x2029}, {0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, } diff --git a/vendor/vendor.json b/vendor/vendor.json index 6e5610460..1307525c3 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -268,22 +268,23 @@ "revisionTime": "2016-07-20T14:16:34Z" }, { - "checksumSHA1": "I4njd26dG5hxFT2nawuByM4pxzY=", + "checksumSHA1": "SEnjvwVyfuU2xBaOfXfwPD5MZqk=", "path": "github.com/mattn/go-colorable", - "revision": "5411d3eea5978e6cdc258b30de592b60df6aba96", - "revisionTime": "2017-02-10T17:28:01Z" + "revision": "efa589957cd060542a26d2dd7832fd6a6c6c3ade", + "revisionTime": "2018-03-10T13:32:14Z", + "version": "efa589957cd060542a26d2dd7832fd6a6c6c3ade" }, { - "checksumSHA1": "EkT5JmFvz3zOPWappEFyYWUaeY0=", + "checksumSHA1": "GiVgQkx5acnq+JZtYiuHPlhHoso=", "path": "github.com/mattn/go-isatty", - "revision": "281032e84ae07510239465db46bf442aa44b953a", - "revisionTime": "2017-02-09T17:56:15Z" + "revision": "3fb116b820352b7f0c281308a4d6250c22d94e27", + "revisionTime": "2018-08-30T10:17:45Z" }, { "checksumSHA1": "MNkKJyk2TazKMJYbal5wFHybpyA=", "path": "github.com/mattn/go-runewidth", - "revision": "14207d285c6c197daabb5c9793d63e7af9ab2d50", - "revisionTime": "2017-02-01T02:35:40Z" + "revision": "ce7b0b5c7b45a81508558cd1dba6bb1e4ddb51bb", + "revisionTime": "2018-04-08T05:53:51Z" }, { "checksumSHA1": "L3leymg2RT8hFl5uL+5KP/LpBkg=", |