mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-15 10:53:09 -06:00
Init go mod & update vendor
Signed-off-by: Benedikt Bongartz <benne@klimlive.de>
This commit is contained in:
parent
41c79e6086
commit
9fd1cd7a37
65
go.mod
Normal file
65
go.mod
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
module github.com/matrix-org/dendrite
|
||||
|
||||
require (
|
||||
github.com/Shopify/sarama v0.0.0-20170127151855-574d3147eee3
|
||||
github.com/alecthomas/gometalinter v2.0.2+incompatible // indirect
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 // indirect
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
|
||||
github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b // indirect
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42 // indirect
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 // indirect
|
||||
github.com/eapache/queue v1.1.0 // indirect
|
||||
github.com/golang/protobuf v0.0.0-20161117033126-8ee79997227b // indirect
|
||||
github.com/golang/snappy v0.0.0-20170119014723-7db9049039a0 // indirect
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 // indirect
|
||||
github.com/gorilla/context v1.1.1 // indirect
|
||||
github.com/gorilla/mux v1.3.0
|
||||
github.com/jaegertracing/jaeger-client-go v0.0.0-20170921145708-3ad49a1d839b // indirect
|
||||
github.com/jaegertracing/jaeger-lib v0.0.0-20170920222118-21a3da6d66fe // indirect
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 // indirect
|
||||
github.com/lib/pq v0.0.0-20170918175043-23da1db4f16d
|
||||
github.com/matrix-org/dugong v0.0.0-20171220115018-ea0a4690a0d5
|
||||
github.com/matrix-org/gomatrix v0.0.0-20171003113848-a7fc80c8060c
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20181109104322-1c2cbc0872f0
|
||||
github.com/matrix-org/naffka v0.0.0-20171115094957-662bfd0841d0
|
||||
github.com/matrix-org/util v0.0.0-20171013132526-8b1c8ab81986
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5
|
||||
github.com/nicksnyder/go-i18n v1.8.1 // indirect
|
||||
github.com/opentracing/opentracing-go v0.0.0-20170806192116-8ebe5d4e236e
|
||||
github.com/pelletier/go-toml v0.0.0-20170904195809-1d6b12b7cb29 // indirect
|
||||
github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac // indirect
|
||||
github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897 // indirect
|
||||
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v0.0.0-20180519192340-c51dc758d4bb
|
||||
github.com/prometheus/client_model v0.0.0-20150212101744-fa8ad6fec335 // indirect
|
||||
github.com/prometheus/common v0.0.0-20170108231212-dd2f054febf4 // indirect
|
||||
github.com/prometheus/procfs v0.0.0-20170128160123-1878d9fbb537 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 // indirect
|
||||
github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2
|
||||
github.com/stretchr/testify v0.0.0-20170809224252-890a5c3458b4 // indirect
|
||||
github.com/tidwall/gjson v1.0.2 // indirect
|
||||
github.com/tidwall/match v1.0.0 // indirect
|
||||
github.com/tidwall/sjson v1.0.0 // indirect
|
||||
github.com/uber-go/atomic v1.3.0 // indirect
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.0.0+incompatible
|
||||
github.com/uber/tchannel-go v0.0.0-20170927010734-b3e26487e291 // indirect
|
||||
go.uber.org/atomic v1.3.0 // indirect
|
||||
go.uber.org/multierr v0.0.0-20170829224307-fb7d312c2c04 // indirect
|
||||
go.uber.org/zap v1.7.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd
|
||||
golang.org/x/net v0.0.0-20170927055102-0a9397675ba3 // indirect
|
||||
golang.org/x/sys v0.0.0-20171012164349-43eea11bc926 // indirect
|
||||
gopkg.in/Shopify/sarama.v1 v1.11.0
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
||||
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20170727041045-23bcc3c4eae3 // indirect
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
||||
gopkg.in/h2non/bimg.v1 v1.0.18
|
||||
gopkg.in/macaroon.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.0.0-20171116090243-287cf08546ab
|
||||
)
|
||||
101
go.sum
Normal file
101
go.sum
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
github.com/Shopify/sarama v0.0.0-20170127151855-574d3147eee3 h1:j6BAEHYn1kUyW2j7kY0mOJ/R8A0qWwXpvUAEHGemm/g=
|
||||
github.com/Shopify/sarama v0.0.0-20170127151855-574d3147eee3/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/alecthomas/gometalinter v2.0.2+incompatible/go.mod h1:qfIpQGGz3d+NmgyPBqv+LSh50emm1pt72EtcX2vKYQk=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42 h1:f8ERmXYuaC+kCSv2w+y3rBK/oVu6If4DEm3jywJJ0hc=
|
||||
github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 h1:oGLoaVIefp3tiOgi7+KInR/nNPvEpPM6GFo+El7fd14=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/golang/protobuf v0.0.0-20161117033126-8ee79997227b h1:fE/yi9pibxGEc0gSJuEShcsBXE2d5FW3OudsjE9tKzQ=
|
||||
github.com/golang/protobuf v0.0.0-20161117033126-8ee79997227b/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170119014723-7db9049039a0 h1:FMElzTwkd/2jQ2QzLEzt97JRgvFhYhnYiaQSwZ7tuyU=
|
||||
github.com/golang/snappy v0.0.0-20170119014723-7db9049039a0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
|
||||
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.3.0 h1:HwSEKGN6U5T2aAQTfu5pW8fiwjSp3IgwdRbkICydk/c=
|
||||
github.com/gorilla/mux v1.3.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/jaegertracing/jaeger-client-go v0.0.0-20170921145708-3ad49a1d839b/go.mod h1:HWG7INeOG1ZE17I/S8eeb+svquXmBS/hf1Obi6hJUyQ=
|
||||
github.com/jaegertracing/jaeger-lib v0.0.0-20170920222118-21a3da6d66fe/go.mod h1:VqeqQrZmZr9G4WdLw4ei9tAHU54iJRkfoFHvTTQn4jQ=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/lib/pq v0.0.0-20170918175043-23da1db4f16d h1:Hdtccv31GWxWoCzWsIhZXy5NxEktzAkA8lywhTKu8O4=
|
||||
github.com/lib/pq v0.0.0-20170918175043-23da1db4f16d/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/matrix-org/dugong v0.0.0-20171220115018-ea0a4690a0d5 h1:nMX2t7hbGF0NYDYySx0pCqEKGKAeZIiSqlWSspetlhY=
|
||||
github.com/matrix-org/dugong v0.0.0-20171220115018-ea0a4690a0d5/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20171003113848-a7fc80c8060c h1:aZap604NyBGhAUE0CyNHz6+Pryye5A5mHnYyO4KPPW8=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20171003113848-a7fc80c8060c/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20181109104322-1c2cbc0872f0 h1:3UzhmERBbis4ZaB3imEbZwtDjGz/oVRC2cLLEajCzJA=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20181109104322-1c2cbc0872f0/go.mod h1:YHyhIQUmuXyKtoVfDUMk/DyU93Taamlu6nPZkij/JtA=
|
||||
github.com/matrix-org/naffka v0.0.0-20171115094957-662bfd0841d0 h1:p7WTwG+aXM86+yVrYAiCMW3ZHSmotVvuRbjtt3jC+4A=
|
||||
github.com/matrix-org/naffka v0.0.0-20171115094957-662bfd0841d0/go.mod h1:cXoYQIENbdWIQHt1SyCo6Bl3C3raHwJ0wgVrXHSqf+A=
|
||||
github.com/matrix-org/util v0.0.0-20171013132526-8b1c8ab81986 h1:TiWl4hLvezAhRPM8tPcPDFTysZ7k4T/1J4GPp/iqlZo=
|
||||
github.com/matrix-org/util v0.0.0-20171013132526-8b1c8ab81986/go.mod h1:lePuOiXLNDott7NZfnQvJk0lAZ5HgvIuWGhel6J+RLA=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 h1:BvoENQQU+fZ9uukda/RzCAL/191HHwJA5b13R6diVlY=
|
||||
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/nicksnyder/go-i18n v1.8.1/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q=
|
||||
github.com/opentracing/opentracing-go v0.0.0-20170806192116-8ebe5d4e236e h1:4cOVGAdR+woaUwhk6bgWI9ESJQDTaJMr8U4OJlT3J0Q=
|
||||
github.com/opentracing/opentracing-go v0.0.0-20170806192116-8ebe5d4e236e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pelletier/go-toml v0.0.0-20170904195809-1d6b12b7cb29/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac h1:tKcxwAA5OHUQjL6sWsuCIcP9OnzN+RwKfvomtIOsfy8=
|
||||
github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897 h1:jp3jc/PyyTrTKjJJ6rWnhTbmo7tGgBFyG9AL5FIrO1I=
|
||||
github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897/go.mod h1:w2waW5Zoa/Wc4Yqe0wgrIYAGKqRMf7czn2HNKXmuL+I=
|
||||
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 h1:chPfVn+gpAM5CTpTyVU9j8J+xgRGwmoDlNDLjKnJiYo=
|
||||
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.0.0-20180519192340-c51dc758d4bb h1:ghXIh3jvLRo/h3y2O7wBgcmH1th5NXQ4XHwK5CgI//I=
|
||||
github.com/prometheus/client_golang v0.0.0-20180519192340-c51dc758d4bb/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_model v0.0.0-20150212101744-fa8ad6fec335 h1:0E/5GnGmzoDCtmzTycjGDWW33H0UBmAhR0h+FC8hWLs=
|
||||
github.com/prometheus/client_model v0.0.0-20150212101744-fa8ad6fec335/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20170108231212-dd2f054febf4 h1:bZG2YNnM/Fjd3kiqaVt13Apkhzz24wBKlxQ+URiggXk=
|
||||
github.com/prometheus/common v0.0.0-20170108231212-dd2f054febf4/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20170128160123-1878d9fbb537 h1:Lp8kgy0YtdO1HXuD85DBJ0xGMbfoiVWRMDyKSOo7v9g=
|
||||
github.com/prometheus/procfs v0.0.0-20170128160123-1878d9fbb537/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 h1:gwcdIpH6NU2iF8CmcqD+CP6+1CkRBOhHaPR+iu6raBY=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2 h1:+8J/sCAVv2Y9Ct1BKszDFJEVWv6Aynr2O4FYGUg6+Mc=
|
||||
github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/stretchr/testify v0.0.0-20170809224252-890a5c3458b4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/tidwall/gjson v1.0.2 h1:5BsM7kyEAHAUGEGDkEKO9Mdyiuw6QQ6TSDdarP0Nnmk=
|
||||
github.com/tidwall/gjson v1.0.2/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
|
||||
github.com/tidwall/match v1.0.0 h1:Ym1EcFkp+UQ4ptxfWlW+iMdq5cPH5nEuGzdf/Pb7VmI=
|
||||
github.com/tidwall/match v1.0.0/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/sjson v1.0.0 h1:hOrzQPtGKlKAudQVmU43GkxEgG8TOgKyiKUyb7sE0rs=
|
||||
github.com/tidwall/sjson v1.0.0/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
|
||||
github.com/uber-go/atomic v1.3.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY=
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw=
|
||||
github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/uber/tchannel-go v0.0.0-20170927010734-b3e26487e291/go.mod h1:Rrgz1eL8kMjW/nEzZos0t+Heq0O4LhnUJVA32OvWKHo=
|
||||
go.uber.org/atomic v1.3.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v0.0.0-20170829224307-fb7d312c2c04/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.7.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd h1:VtIkGDhk0ph3t+THbvXHfMZ8QHgsBO39Nh52+74pq7w=
|
||||
golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20170927055102-0a9397675ba3 h1:tTDpczhDVjW6WN3DinzKcw5juwkDTVn22I7MNlfxSXM=
|
||||
golang.org/x/net v0.0.0-20170927055102-0a9397675ba3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sys v0.0.0-20171012164349-43eea11bc926 h1:PY6OU86NqbyZiOzaPnDw6oOjAGtYQqIua16z6y9QkwE=
|
||||
golang.org/x/sys v0.0.0-20171012164349-43eea11bc926/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
gopkg.in/Shopify/sarama.v1 v1.11.0 h1:/3kaCyeYaPbr59IBjeqhIcUOB1vXlIVqXAYa5g5C5F0=
|
||||
gopkg.in/Shopify/sarama.v1 v1.11.0/go.mod h1:AxnvoaevB2nBjNK17cG61A3LleFcWFwVBHBt+cot4Oc=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20170727041045-23bcc3c4eae3/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/h2non/bimg.v1 v1.0.18 h1:qn6/RpBHt+7WQqoBcK+aF2puc6nC78eZj5LexxoalT4=
|
||||
gopkg.in/h2non/bimg.v1 v1.0.18/go.mod h1:PgsZL7dLwUbsGm1NYps320GxGgvQNTnecMCZqxV11So=
|
||||
gopkg.in/macaroon.v2 v2.0.0/go.mod h1:+I6LnTMkm/uV5ew/0nsulNjL16SK4+C8yDmRUzHR17I=
|
||||
gopkg.in/yaml.v2 v2.0.0-20171116090243-287cf08546ab h1:yZ6iByf7GKeJ3gsd1Dr/xaj1DyJ//wxKX1Cdh8LhoAw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20171116090243-287cf08546ab/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
355
vendor/github.com/Shopify/sarama/CHANGELOG.md
generated
vendored
Normal file
355
vendor/github.com/Shopify/sarama/CHANGELOG.md
generated
vendored
Normal file
|
|
@ -0,0 +1,355 @@
|
|||
# Changelog
|
||||
|
||||
#### Version 1.11.0 (2016-12-20)
|
||||
|
||||
_Important:_ As of Sarama 1.11 it is necessary to set the config value of
|
||||
`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
|
||||
versions would silently override this value when instantiating a SyncProducer
|
||||
which led to unexpected values and data races.
|
||||
|
||||
New Features:
|
||||
- Metrics! Thanks to Sébastien Launay for all his work on this feature
|
||||
([#701](https://github.com/Shopify/sarama/pull/701),
|
||||
[#746](https://github.com/Shopify/sarama/pull/746),
|
||||
[#766](https://github.com/Shopify/sarama/pull/766)).
|
||||
- Add support for LZ4 compression
|
||||
([#786](https://github.com/Shopify/sarama/pull/786)).
|
||||
- Add support for ListOffsetRequest v1 and Kafka 0.10.1
|
||||
([#775](https://github.com/Shopify/sarama/pull/775)).
|
||||
- Added a `HighWaterMarks` method to the Consumer which aggregates the
|
||||
`HighWaterMarkOffset` values of its child topic/partitions
|
||||
([#769](https://github.com/Shopify/sarama/pull/769)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fixed producing when using timestamps, compression and Kafka 0.10
|
||||
([#759](https://github.com/Shopify/sarama/pull/759)).
|
||||
- Added missing decoder methods to DescribeGroups response
|
||||
([#756](https://github.com/Shopify/sarama/pull/756)).
|
||||
- Fix producer shutdown when `Return.Errors` is disabled
|
||||
([#787](https://github.com/Shopify/sarama/pull/787)).
|
||||
- Don't mutate configuration in SyncProducer
|
||||
([#790](https://github.com/Shopify/sarama/pull/790)).
|
||||
- Fix crash on SASL initialization failure
|
||||
([#795](https://github.com/Shopify/sarama/pull/795)).
|
||||
|
||||
#### Version 1.10.1 (2016-08-30)
|
||||
|
||||
Bug Fixes:
|
||||
- Fix the documentation for `HashPartitioner` which was incorrect
|
||||
([#717](https://github.com/Shopify/sarama/pull/717)).
|
||||
- Permit client creation even when it is limited by ACLs
|
||||
([#722](https://github.com/Shopify/sarama/pull/722)).
|
||||
- Several fixes to the consumer timer optimization code, regressions introduced
|
||||
in v1.10.0. Go's timers are finicky
|
||||
([#730](https://github.com/Shopify/sarama/pull/730),
|
||||
[#733](https://github.com/Shopify/sarama/pull/733),
|
||||
[#734](https://github.com/Shopify/sarama/pull/734)).
|
||||
- Handle consuming compressed relative offsets with Kafka 0.10
|
||||
([#735](https://github.com/Shopify/sarama/pull/735)).
|
||||
|
||||
#### Version 1.10.0 (2016-08-02)
|
||||
|
||||
_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
|
||||
Kafka you are running against (via the `config.Version` value) in order to use
|
||||
features that may not be compatible with old Kafka versions. If you don't
|
||||
specify this value it will default to 0.8.2 (the minimum supported), and trying
|
||||
to use more recent features (like the offset manager) will fail with an error.
|
||||
|
||||
_Also:_ The offset-manager's behaviour has been changed to match the upstream
|
||||
java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
|
||||
[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
|
||||
offset-manager, please ensure that you are committing one *greater* than the
|
||||
last consumed message offset or else you may end up consuming duplicate
|
||||
messages.
|
||||
|
||||
New Features:
|
||||
- Support for Kafka 0.10
|
||||
([#672](https://github.com/Shopify/sarama/pull/672),
|
||||
[#678](https://github.com/Shopify/sarama/pull/678),
|
||||
[#681](https://github.com/Shopify/sarama/pull/681), and others).
|
||||
- Support for configuring the target Kafka version
|
||||
([#676](https://github.com/Shopify/sarama/pull/676)).
|
||||
- Batch producing support in the SyncProducer
|
||||
([#677](https://github.com/Shopify/sarama/pull/677)).
|
||||
- Extend producer mock to allow setting expectations on message contents
|
||||
([#667](https://github.com/Shopify/sarama/pull/667)).
|
||||
|
||||
Improvements:
|
||||
- Support `nil` compressed messages for deleting in compacted topics
|
||||
([#634](https://github.com/Shopify/sarama/pull/634)).
|
||||
- Pre-allocate decoding errors, greatly reducing heap usage and GC time against
|
||||
misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
|
||||
- Re-use consumer expiry timers, removing one allocation per consumed message
|
||||
([#707](https://github.com/Shopify/sarama/pull/707)).
|
||||
|
||||
Bug Fixes:
|
||||
- Actually default the client ID to "sarama" like we say we do
|
||||
([#664](https://github.com/Shopify/sarama/pull/664)).
|
||||
- Fix a rare issue where `Client.Leader` could return the wrong error
|
||||
([#685](https://github.com/Shopify/sarama/pull/685)).
|
||||
- Fix a possible tight loop in the consumer
|
||||
([#693](https://github.com/Shopify/sarama/pull/693)).
|
||||
- Match upstream's offset-tracking behaviour
|
||||
([#705](https://github.com/Shopify/sarama/pull/705)).
|
||||
- Report UnknownTopicOrPartition errors from the offset manager
|
||||
([#706](https://github.com/Shopify/sarama/pull/706)).
|
||||
- Fix possible negative partition value from the HashPartitioner
|
||||
([#709](https://github.com/Shopify/sarama/pull/709)).
|
||||
|
||||
#### Version 1.9.0 (2016-05-16)
|
||||
|
||||
New Features:
|
||||
- Add support for custom offset manager retention durations
|
||||
([#602](https://github.com/Shopify/sarama/pull/602)).
|
||||
- Publish low-level mocks to enable testing of third-party producer/consumer
|
||||
implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
|
||||
- Declare support for Golang 1.6
|
||||
([#611](https://github.com/Shopify/sarama/pull/611)).
|
||||
- Support for SASL plain-text auth
|
||||
([#648](https://github.com/Shopify/sarama/pull/648)).
|
||||
|
||||
Improvements:
|
||||
- Simplified broker locking scheme slightly
|
||||
([#604](https://github.com/Shopify/sarama/pull/604)).
|
||||
- Documentation cleanup
|
||||
([#605](https://github.com/Shopify/sarama/pull/605),
|
||||
[#621](https://github.com/Shopify/sarama/pull/621),
|
||||
[#654](https://github.com/Shopify/sarama/pull/654)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix race condition shutting down the OffsetManager
|
||||
([#658](https://github.com/Shopify/sarama/pull/658)).
|
||||
|
||||
#### Version 1.8.0 (2016-02-01)
|
||||
|
||||
New Features:
|
||||
- Full support for Kafka 0.9:
|
||||
- All protocol messages and fields
|
||||
([#586](https://github.com/Shopify/sarama/pull/586),
|
||||
[#588](https://github.com/Shopify/sarama/pull/588),
|
||||
[#590](https://github.com/Shopify/sarama/pull/590)).
|
||||
- Verified that TLS support works
|
||||
([#581](https://github.com/Shopify/sarama/pull/581)).
|
||||
- Fixed the OffsetManager compatibility
|
||||
([#585](https://github.com/Shopify/sarama/pull/585)).
|
||||
|
||||
Improvements:
|
||||
- Optimize for fewer system calls when reading from the network
|
||||
([#584](https://github.com/Shopify/sarama/pull/584)).
|
||||
- Automatically retry `InvalidMessage` errors to match upstream behaviour
|
||||
([#589](https://github.com/Shopify/sarama/pull/589)).
|
||||
|
||||
#### Version 1.7.0 (2015-12-11)
|
||||
|
||||
New Features:
|
||||
- Preliminary support for Kafka 0.9
|
||||
([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
|
||||
caveats:
|
||||
- Protocol-layer support is mostly in place
|
||||
([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
|
||||
renamed some messages and fields, which we did not in order to preserve API
|
||||
compatibility.
|
||||
- The producer and consumer work against 0.9, but the offset manager does
|
||||
not ([#573](https://github.com/Shopify/sarama/pull/573)).
|
||||
- TLS support may or may not work
|
||||
([#581](https://github.com/Shopify/sarama/pull/581)).
|
||||
|
||||
Improvements:
|
||||
- Don't wait for request timeouts on dead brokers, greatly speeding recovery
|
||||
when the TCP connection is left hanging
|
||||
([#548](https://github.com/Shopify/sarama/pull/548)).
|
||||
- Refactored part of the producer. The new version provides a much more elegant
|
||||
solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
|
||||
slightly more efficient, and much more precise in calculating batch sizes
|
||||
when compression is used
|
||||
([#549](https://github.com/Shopify/sarama/pull/549),
|
||||
[#550](https://github.com/Shopify/sarama/pull/550),
|
||||
[#551](https://github.com/Shopify/sarama/pull/551)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix race condition in consumer test mock
|
||||
([#553](https://github.com/Shopify/sarama/pull/553)).
|
||||
|
||||
#### Version 1.6.1 (2015-09-25)
|
||||
|
||||
Bug Fixes:
|
||||
- Fix panic that could occur if a user-supplied message value failed to encode
|
||||
([#449](https://github.com/Shopify/sarama/pull/449)).
|
||||
|
||||
#### Version 1.6.0 (2015-09-04)
|
||||
|
||||
New Features:
|
||||
- Implementation of a consumer offset manager using the APIs introduced in
|
||||
Kafka 0.8.2. The API is designed mainly for integration into a future
|
||||
high-level consumer, not for direct use, although it is *possible* to use it
|
||||
directly.
|
||||
([#461](https://github.com/Shopify/sarama/pull/461)).
|
||||
|
||||
Improvements:
|
||||
- CRC32 calculation is much faster on machines with SSE4.2 instructions,
|
||||
removing a major hotspot from most profiles
|
||||
([#255](https://github.com/Shopify/sarama/pull/255)).
|
||||
|
||||
Bug Fixes:
|
||||
- Make protocol decoding more robust against some malformed packets generated
|
||||
by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
|
||||
[#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
|
||||
([#528](https://github.com/Shopify/sarama/pull/528)).
|
||||
- Fix a potential race condition panic in the consumer on shutdown
|
||||
([#529](https://github.com/Shopify/sarama/pull/529)).
|
||||
|
||||
#### Version 1.5.0 (2015-08-17)
|
||||
|
||||
New Features:
|
||||
- TLS-encrypted network connections are now supported. This feature is subject
|
||||
to change when Kafka releases built-in TLS support, but for now this is
|
||||
enough to work with TLS-terminating proxies
|
||||
([#154](https://github.com/Shopify/sarama/pull/154)).
|
||||
|
||||
Improvements:
|
||||
- The consumer will not block if a single partition is not drained by the user;
|
||||
all other partitions will continue to consume normally
|
||||
([#485](https://github.com/Shopify/sarama/pull/485)).
|
||||
- Formatting of error strings has been much improved
|
||||
([#495](https://github.com/Shopify/sarama/pull/495)).
|
||||
- Internal refactoring of the producer for code cleanliness and to enable
|
||||
future work ([#300](https://github.com/Shopify/sarama/pull/300)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix a potential deadlock in the consumer on shutdown
|
||||
([#475](https://github.com/Shopify/sarama/pull/475)).
|
||||
|
||||
#### Version 1.4.3 (2015-07-21)
|
||||
|
||||
Bug Fixes:
|
||||
- Don't include the partitioner in the producer's "fetch partitions"
|
||||
circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
|
||||
- Don't retry messages until the broker is closed when abandoning a broker in
|
||||
the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
|
||||
- Update the import path for snappy-go, it has moved again and the API has
|
||||
changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
|
||||
|
||||
#### Version 1.4.2 (2015-05-27)
|
||||
|
||||
Bug Fixes:
|
||||
- Update the import path for snappy-go, it has moved from google code to github
|
||||
([#456](https://github.com/Shopify/sarama/pull/456)).
|
||||
|
||||
#### Version 1.4.1 (2015-05-25)
|
||||
|
||||
Improvements:
|
||||
- Optimizations when decoding snappy messages, thanks to John Potocny
|
||||
([#446](https://github.com/Shopify/sarama/pull/446)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix hypothetical race conditions on producer shutdown
|
||||
([#450](https://github.com/Shopify/sarama/pull/450),
|
||||
[#451](https://github.com/Shopify/sarama/pull/451)).
|
||||
|
||||
#### Version 1.4.0 (2015-05-01)
|
||||
|
||||
New Features:
|
||||
- The consumer now implements `Topics()` and `Partitions()` methods to enable
|
||||
users to dynamically choose what topics/partitions to consume without
|
||||
instantiating a full client
|
||||
([#431](https://github.com/Shopify/sarama/pull/431)).
|
||||
- The partition-consumer now exposes the high water mark offset value returned
|
||||
by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
|
||||
- Added a `kafka-console-consumer` tool capable of handling multiple
|
||||
partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
|
||||
([#439](https://github.com/Shopify/sarama/pull/439),
|
||||
[#442](https://github.com/Shopify/sarama/pull/442)).
|
||||
|
||||
Improvements:
|
||||
- The producer's logging during retry scenarios is more consistent, more
|
||||
useful, and slightly less verbose
|
||||
([#429](https://github.com/Shopify/sarama/pull/429)).
|
||||
- The client now shuffles its initial list of seed brokers in order to prevent
|
||||
thundering herd on the first broker in the list
|
||||
([#441](https://github.com/Shopify/sarama/pull/441)).
|
||||
|
||||
Bug Fixes:
|
||||
- The producer now correctly manages its state if retries occur when it is
|
||||
shutting down, fixing several instances of confusing behaviour and at least
|
||||
one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
|
||||
- The consumer now handles messages for different partitions asynchronously,
|
||||
making it much more resilient to specific user code ordering
|
||||
([#325](https://github.com/Shopify/sarama/pull/325)).
|
||||
|
||||
#### Version 1.3.0 (2015-04-16)
|
||||
|
||||
New Features:
|
||||
- The client now tracks consumer group coordinators using
|
||||
ConsumerMetadataRequests similar to how it tracks partition leadership using
|
||||
regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
|
||||
This adds two methods to the client API:
|
||||
- `Coordinator(consumerGroup string) (*Broker, error)`
|
||||
- `RefreshCoordinator(consumerGroup string) error`
|
||||
|
||||
Improvements:
|
||||
- ConsumerMetadataResponses now automatically create a Broker object out of the
|
||||
ID/address/port combination for the Coordinator; accessing the fields
|
||||
individually has been deprecated
|
||||
([#413](https://github.com/Shopify/sarama/pull/413)).
|
||||
- Much improved handling of `OffsetOutOfRange` errors in the consumer.
|
||||
Consumers will fail to start if the provided offset is out of range
|
||||
([#418](https://github.com/Shopify/sarama/pull/418))
|
||||
and they will automatically shut down if the offset falls out of range
|
||||
([#424](https://github.com/Shopify/sarama/pull/424)).
|
||||
- Small performance improvement in encoding and decoding protocol messages
|
||||
([#427](https://github.com/Shopify/sarama/pull/427)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix a rare race condition in the client's background metadata refresher if
|
||||
it happens to be activated while the client is being closed
|
||||
([#422](https://github.com/Shopify/sarama/pull/422)).
|
||||
|
||||
#### Version 1.2.0 (2015-04-07)
|
||||
|
||||
Improvements:
|
||||
- The producer's behaviour when `Flush.Frequency` is set is now more intuitive
|
||||
([#389](https://github.com/Shopify/sarama/pull/389)).
|
||||
- The producer is now somewhat more memory-efficient during and after retrying
|
||||
messages due to an improved queue implementation
|
||||
([#396](https://github.com/Shopify/sarama/pull/396)).
|
||||
- The consumer produces much more useful logging output when leadership
|
||||
changes ([#385](https://github.com/Shopify/sarama/pull/385)).
|
||||
- The client's `GetOffset` method will now automatically refresh metadata and
|
||||
retry once in the event of stale information or similar
|
||||
([#394](https://github.com/Shopify/sarama/pull/394)).
|
||||
- Broker connections now have support for using TCP keepalives
|
||||
([#407](https://github.com/Shopify/sarama/issues/407)).
|
||||
|
||||
Bug Fixes:
|
||||
- The OffsetCommitRequest message now correctly implements all three possible
|
||||
API versions ([#390](https://github.com/Shopify/sarama/pull/390),
|
||||
[#400](https://github.com/Shopify/sarama/pull/400)).
|
||||
|
||||
#### Version 1.1.0 (2015-03-20)
|
||||
|
||||
Improvements:
|
||||
- Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
|
||||
broken topics don't choke throughput
|
||||
([#373](https://github.com/Shopify/sarama/pull/373)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix the producer's internal reference counting in certain unusual scenarios
|
||||
([#367](https://github.com/Shopify/sarama/pull/367)).
|
||||
- Fix the consumer's internal reference counting in certain unusual scenarios
|
||||
([#369](https://github.com/Shopify/sarama/pull/369)).
|
||||
- Fix a condition where the producer's internal control messages could have
|
||||
gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
|
||||
- Fix an issue where invalid partition lists would be cached when asking for
|
||||
metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
|
||||
|
||||
|
||||
#### Version 1.0.0 (2015-03-17)
|
||||
|
||||
Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
|
||||
|
||||
- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
|
||||
- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
|
||||
- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
|
||||
- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
|
||||
- All the configuration values have been unified in the `Config` struct.
|
||||
- Much improved test suite.
|
||||
20
vendor/github.com/Shopify/sarama/MIT-LICENSE
generated
vendored
Normal file
20
vendor/github.com/Shopify/sarama/MIT-LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
Copyright (c) 2013 Evan Huus
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
21
vendor/github.com/Shopify/sarama/Makefile
generated
vendored
Normal file
21
vendor/github.com/Shopify/sarama/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
default: fmt vet errcheck test
|
||||
|
||||
test:
|
||||
go test -v -timeout 60s -race ./...
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
errcheck:
|
||||
errcheck github.com/Shopify/sarama/...
|
||||
|
||||
fmt:
|
||||
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
|
||||
|
||||
install_dependencies: install_errcheck get
|
||||
|
||||
install_errcheck:
|
||||
go get github.com/kisielk/errcheck
|
||||
|
||||
get:
|
||||
go get -t
|
||||
36
vendor/github.com/Shopify/sarama/README.md
generated
vendored
Normal file
36
vendor/github.com/Shopify/sarama/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
sarama
|
||||
======
|
||||
|
||||
[](https://godoc.org/github.com/Shopify/sarama)
|
||||
[](https://travis-ci.org/Shopify/sarama)
|
||||
|
||||
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
|
||||
|
||||
### Getting started
|
||||
|
||||
- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
|
||||
- Mocks for testing are available in the [mocks](./mocks) subpackage.
|
||||
- The [examples](./examples) directory contains more elaborate example applications.
|
||||
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
|
||||
|
||||
### Compatibility and API stability
|
||||
|
||||
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
|
||||
the two latest stable releases of Kafka and Go, and we provide a two month
|
||||
grace period for older releases. This means we currently officially support
|
||||
Go 1.7 and 1.6, and Kafka 0.10.0 and 0.9.0, although older releases are
|
||||
still likely to work.
|
||||
|
||||
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
|
||||
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
|
||||
A changelog is available [here](CHANGELOG.md).
|
||||
|
||||
### Contributing
|
||||
|
||||
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md).
|
||||
* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
|
||||
technical and design details.
|
||||
* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
|
||||
contains a wealth of useful information.
|
||||
* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
|
||||
* If you have any questions, just ask!
|
||||
20
vendor/github.com/Shopify/sarama/Vagrantfile
generated
vendored
Normal file
20
vendor/github.com/Shopify/sarama/Vagrantfile
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
|
||||
MEMORY = 3072
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
|
||||
config.vm.provision :shell, path: "vagrant/provision.sh"
|
||||
|
||||
config.vm.network "private_network", ip: "192.168.100.67"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = MEMORY
|
||||
end
|
||||
end
|
||||
24
vendor/github.com/Shopify/sarama/api_versions_request.go
generated
vendored
Normal file
24
vendor/github.com/Shopify/sarama/api_versions_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package sarama
|
||||
|
||||
type ApiVersionsRequest struct {
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
||||
86
vendor/github.com/Shopify/sarama/api_versions_response.go
generated
vendored
Normal file
86
vendor/github.com/Shopify/sarama/api_versions_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
package sarama
|
||||
|
||||
type ApiVersionsResponseBlock struct {
|
||||
ApiKey int16
|
||||
MinVersion int16
|
||||
MaxVersion int16
|
||||
}
|
||||
|
||||
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt16(b.ApiKey)
|
||||
pe.putInt16(b.MinVersion)
|
||||
pe.putInt16(b.MaxVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
|
||||
var err error
|
||||
|
||||
if b.ApiKey, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.MinVersion, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.MaxVersion, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ApiVersionsResponse struct {
|
||||
Err KError
|
||||
ApiVersions []*ApiVersionsResponseBlock
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, apiVersion := range r.ApiVersions {
|
||||
if err := apiVersion.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
|
||||
for i := 0; i < numBlocks; i++ {
|
||||
block := new(ApiVersionsResponseBlock)
|
||||
if err := block.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.ApiVersions[i] = block
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
||||
905
vendor/github.com/Shopify/sarama/async_producer.go
generated
vendored
Normal file
905
vendor/github.com/Shopify/sarama/async_producer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,905 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-resiliency/breaker"
|
||||
"github.com/eapache/queue"
|
||||
)
|
||||
|
||||
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
|
||||
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
|
||||
// and parses responses for errors. You must read from the Errors() channel or the
|
||||
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
|
||||
// leaks: it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
type AsyncProducer interface {
|
||||
|
||||
// AsyncClose triggers a shutdown of the producer, flushing any messages it may
|
||||
// have buffered. The shutdown has completed when both the Errors and Successes
|
||||
// channels have been closed. When calling AsyncClose, you *must* continue to
|
||||
// read from those channels in order to drain the results of any messages in
|
||||
// flight.
|
||||
AsyncClose()
|
||||
|
||||
// Close shuts down the producer and flushes any messages it may have buffered.
|
||||
// You must call this function before a producer object passes out of scope, as
|
||||
// it may otherwise leak memory. You must call this before calling Close on the
|
||||
// underlying client.
|
||||
Close() error
|
||||
|
||||
// Input is the input channel for the user to write messages to that they
|
||||
// wish to send.
|
||||
Input() chan<- *ProducerMessage
|
||||
|
||||
// Successes is the success output channel back to the user when AckSuccesses is
|
||||
// enabled. If Return.Successes is true, you MUST read from this channel or the
|
||||
// Producer will deadlock. It is suggested that you send and read messages
|
||||
// together in a single select statement.
|
||||
Successes() <-chan *ProducerMessage
|
||||
|
||||
// Errors is the error output channel back to the user. You MUST read from this
|
||||
// channel or the Producer will deadlock when the channel is full. Alternatively,
|
||||
// you can set Producer.Return.Errors in your config to false, which prevents
|
||||
// errors to be returned.
|
||||
Errors() <-chan *ProducerError
|
||||
}
|
||||
|
||||
type asyncProducer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
errors chan *ProducerError
|
||||
input, successes, retries chan *ProducerMessage
|
||||
inFlight sync.WaitGroup
|
||||
|
||||
brokers map[*Broker]chan<- *ProducerMessage
|
||||
brokerRefs map[chan<- *ProducerMessage]int
|
||||
brokerLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
|
||||
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
|
||||
client, err := NewClient(addrs, conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.(*asyncProducer).ownClient = true
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this producer.
|
||||
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
p := &asyncProducer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
errors: make(chan *ProducerError),
|
||||
input: make(chan *ProducerMessage),
|
||||
successes: make(chan *ProducerMessage),
|
||||
retries: make(chan *ProducerMessage),
|
||||
brokers: make(map[*Broker]chan<- *ProducerMessage),
|
||||
brokerRefs: make(map[chan<- *ProducerMessage]int),
|
||||
}
|
||||
|
||||
// launch our singleton dispatchers
|
||||
go withRecover(p.dispatcher)
|
||||
go withRecover(p.retryHandler)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type flagSet int8
|
||||
|
||||
const (
|
||||
syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
|
||||
fin // final message from partitionProducer to brokerProducer and back
|
||||
shutdown // start the shutdown process
|
||||
)
|
||||
|
||||
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
|
||||
type ProducerMessage struct {
|
||||
Topic string // The Kafka topic for this message.
|
||||
// The partitioning key for this message. Pre-existing Encoders include
|
||||
// StringEncoder and ByteEncoder.
|
||||
Key Encoder
|
||||
// The actual message to store in Kafka. Pre-existing Encoders include
|
||||
// StringEncoder and ByteEncoder.
|
||||
Value Encoder
|
||||
|
||||
// This field is used to hold arbitrary data you wish to include so it
|
||||
// will be available when receiving on the Successes and Errors channels.
|
||||
// Sarama completely ignores this field and is only to be used for
|
||||
// pass-through data.
|
||||
Metadata interface{}
|
||||
|
||||
// Below this point are filled in by the producer as the message is processed
|
||||
|
||||
// Offset is the offset of the message stored on the broker. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered and
|
||||
// RequiredAcks is not NoResponse.
|
||||
Offset int64
|
||||
// Partition is the partition that the message was sent to. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered.
|
||||
Partition int32
|
||||
// Timestamp is the timestamp assigned to the message by the broker. This
|
||||
// is only guaranteed to be defined if the message was successfully
|
||||
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
|
||||
// least version 0.10.0.
|
||||
Timestamp time.Time
|
||||
|
||||
retries int
|
||||
flags flagSet
|
||||
}
|
||||
|
||||
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
|
||||
|
||||
func (m *ProducerMessage) byteSize() int {
|
||||
size := producerMessageOverhead
|
||||
if m.Key != nil {
|
||||
size += m.Key.Length()
|
||||
}
|
||||
if m.Value != nil {
|
||||
size += m.Value.Length()
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (m *ProducerMessage) clear() {
|
||||
m.flags = 0
|
||||
m.retries = 0
|
||||
}
|
||||
|
||||
// ProducerError is the type of error generated when the producer fails to deliver a message.
|
||||
// It contains the original ProducerMessage as well as the actual error value.
|
||||
type ProducerError struct {
|
||||
Msg *ProducerMessage
|
||||
Err error
|
||||
}
|
||||
|
||||
func (pe ProducerError) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
|
||||
}
|
||||
|
||||
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
|
||||
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
|
||||
// when closing a producer.
|
||||
type ProducerErrors []*ProducerError
|
||||
|
||||
func (pe ProducerErrors) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Errors() <-chan *ProducerError {
|
||||
return p.errors
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
|
||||
return p.successes
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Input() chan<- *ProducerMessage {
|
||||
return p.input
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Close() error {
|
||||
p.AsyncClose()
|
||||
|
||||
if p.conf.Producer.Return.Successes {
|
||||
go withRecover(func() {
|
||||
for _ = range p.successes {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var errors ProducerErrors
|
||||
if p.conf.Producer.Return.Errors {
|
||||
for event := range p.errors {
|
||||
errors = append(errors, event)
|
||||
}
|
||||
} else {
|
||||
<-p.errors
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *asyncProducer) AsyncClose() {
|
||||
go withRecover(p.shutdown)
|
||||
}
|
||||
|
||||
// singleton
|
||||
// dispatches messages by topic
|
||||
func (p *asyncProducer) dispatcher() {
|
||||
handlers := make(map[string]chan<- *ProducerMessage)
|
||||
shuttingDown := false
|
||||
|
||||
for msg := range p.input {
|
||||
if msg == nil {
|
||||
Logger.Println("Something tried to send a nil message, it was ignored.")
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.flags&shutdown != 0 {
|
||||
shuttingDown = true
|
||||
p.inFlight.Done()
|
||||
continue
|
||||
} else if msg.retries == 0 {
|
||||
if shuttingDown {
|
||||
// we can't just call returnError here because that decrements the wait group,
|
||||
// which hasn't been incremented yet for this message, and shouldn't be
|
||||
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.inFlight.Add(1)
|
||||
}
|
||||
|
||||
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
|
||||
p.returnError(msg, ErrMessageSizeTooLarge)
|
||||
continue
|
||||
}
|
||||
|
||||
handler := handlers[msg.Topic]
|
||||
if handler == nil {
|
||||
handler = p.newTopicProducer(msg.Topic)
|
||||
handlers[msg.Topic] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// one per topic
|
||||
// partitions messages, then dispatches them by partition
|
||||
type topicProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
breaker *breaker.Breaker
|
||||
handlers map[int32]chan<- *ProducerMessage
|
||||
partitioner Partitioner
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
tp := &topicProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
input: input,
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
handlers: make(map[int32]chan<- *ProducerMessage),
|
||||
partitioner: p.conf.Producer.Partitioner(topic),
|
||||
}
|
||||
go withRecover(tp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (tp *topicProducer) dispatch() {
|
||||
for msg := range tp.input {
|
||||
if msg.retries == 0 {
|
||||
if err := tp.partitionMessage(msg); err != nil {
|
||||
tp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
handler := tp.handlers[msg.Partition]
|
||||
if handler == nil {
|
||||
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
|
||||
tp.handlers[msg.Partition] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range tp.handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
|
||||
var partitions []int32
|
||||
|
||||
err := tp.breaker.Run(func() (err error) {
|
||||
if tp.partitioner.RequiresConsistency() {
|
||||
partitions, err = tp.parent.client.Partitions(msg.Topic)
|
||||
} else {
|
||||
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
|
||||
}
|
||||
return
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numPartitions := int32(len(partitions))
|
||||
|
||||
if numPartitions == 0 {
|
||||
return ErrLeaderNotAvailable
|
||||
}
|
||||
|
||||
choice, err := tp.partitioner.Partition(msg, numPartitions)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if choice < 0 || choice >= numPartitions {
|
||||
return ErrInvalidPartition
|
||||
}
|
||||
|
||||
msg.Partition = partitions[choice]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// one per partition per topic
|
||||
// dispatches messages to the appropriate broker
|
||||
// also responsible for maintaining message order during retries
|
||||
type partitionProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
partition int32
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
leader *Broker
|
||||
breaker *breaker.Breaker
|
||||
output chan<- *ProducerMessage
|
||||
|
||||
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
|
||||
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
|
||||
// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
|
||||
// therefore whether our buffer is complete and safe to flush)
|
||||
highWatermark int
|
||||
retryState []partitionRetryState
|
||||
}
|
||||
|
||||
type partitionRetryState struct {
|
||||
buf []*ProducerMessage
|
||||
expectChaser bool
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
pp := &partitionProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
input: input,
|
||||
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
|
||||
}
|
||||
go withRecover(pp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) dispatch() {
|
||||
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
|
||||
// on the first message
|
||||
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
|
||||
if pp.leader != nil {
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
}
|
||||
|
||||
for msg := range pp.input {
|
||||
if msg.retries > pp.highWatermark {
|
||||
// a new, higher, retry level; handle it and then back off
|
||||
pp.newHighWatermark(msg.retries)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
} else if pp.highWatermark > 0 {
|
||||
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
|
||||
if msg.retries < pp.highWatermark {
|
||||
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
|
||||
if msg.flags&fin == fin {
|
||||
pp.retryState[msg.retries].expectChaser = false
|
||||
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
|
||||
} else {
|
||||
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
|
||||
}
|
||||
continue
|
||||
} else if msg.flags&fin == fin {
|
||||
// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
|
||||
// meaning this retry level is done and we can go down (at least) one level and flush that
|
||||
pp.retryState[pp.highWatermark].expectChaser = false
|
||||
pp.flushRetryBuffers()
|
||||
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
|
||||
// without breaking any of our ordering guarantees
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnError(msg, err)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
continue
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
if pp.output != nil {
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) newHighWatermark(hwm int) {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
|
||||
pp.highWatermark = hwm
|
||||
|
||||
// send off a fin so that we know when everything "in between" has made it
|
||||
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
|
||||
pp.retryState[pp.highWatermark].expectChaser = true
|
||||
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
|
||||
|
||||
// a new HWM means that our current broker selection is out of date
|
||||
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
pp.output = nil
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) flushRetryBuffers() {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
for {
|
||||
pp.highWatermark--
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
|
||||
goto flushDone
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
for _, msg := range pp.retryState[pp.highWatermark].buf {
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
flushDone:
|
||||
pp.retryState[pp.highWatermark].buf = nil
|
||||
if pp.retryState[pp.highWatermark].expectChaser {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
break
|
||||
} else if pp.highWatermark == 0 {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) updateLeader() error {
|
||||
return pp.breaker.Run(func() (err error) {
|
||||
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// one per broker; also constructs an associated flusher
|
||||
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
var (
|
||||
input = make(chan *ProducerMessage)
|
||||
bridge = make(chan *produceSet)
|
||||
responses = make(chan *brokerProducerResponse)
|
||||
)
|
||||
|
||||
bp := &brokerProducer{
|
||||
parent: p,
|
||||
broker: broker,
|
||||
input: input,
|
||||
output: bridge,
|
||||
responses: responses,
|
||||
buffer: newProduceSet(p),
|
||||
currentRetries: make(map[string]map[int32]error),
|
||||
}
|
||||
go withRecover(bp.run)
|
||||
|
||||
// minimal bridge to make the network response `select`able
|
||||
go withRecover(func() {
|
||||
for set := range bridge {
|
||||
request := set.buildRequest()
|
||||
|
||||
response, err := broker.Produce(request)
|
||||
|
||||
responses <- &brokerProducerResponse{
|
||||
set: set,
|
||||
err: err,
|
||||
res: response,
|
||||
}
|
||||
}
|
||||
close(responses)
|
||||
})
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
type brokerProducerResponse struct {
|
||||
set *produceSet
|
||||
err error
|
||||
res *ProduceResponse
|
||||
}
|
||||
|
||||
// groups messages together into appropriately-sized batches for sending to the broker
|
||||
// handles state related to retries etc
|
||||
type brokerProducer struct {
|
||||
parent *asyncProducer
|
||||
broker *Broker
|
||||
|
||||
input <-chan *ProducerMessage
|
||||
output chan<- *produceSet
|
||||
responses <-chan *brokerProducerResponse
|
||||
|
||||
buffer *produceSet
|
||||
timer <-chan time.Time
|
||||
timerFired bool
|
||||
|
||||
closing error
|
||||
currentRetries map[string]map[int32]error
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) run() {
|
||||
var output chan<- *produceSet
|
||||
Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-bp.input:
|
||||
if msg == nil {
|
||||
bp.shutdown()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.flags&syn == syn {
|
||||
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
if bp.currentRetries[msg.Topic] == nil {
|
||||
bp.currentRetries[msg.Topic] = make(map[int32]error)
|
||||
}
|
||||
bp.currentRetries[msg.Topic][msg.Partition] = nil
|
||||
bp.parent.inFlight.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
bp.parent.retryMessage(msg, reason)
|
||||
|
||||
if bp.closing == nil && msg.flags&fin == fin {
|
||||
// we were retrying this partition but we can start processing again
|
||||
delete(bp.currentRetries[msg.Topic], msg.Partition)
|
||||
Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if bp.buffer.wouldOverflow(msg) {
|
||||
if err := bp.waitForSpace(msg); err != nil {
|
||||
bp.parent.retryMessage(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := bp.buffer.add(msg); err != nil {
|
||||
bp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
|
||||
bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
|
||||
}
|
||||
case <-bp.timer:
|
||||
bp.timerFired = true
|
||||
case output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
if bp.timerFired || bp.buffer.readyToFlush() {
|
||||
output = bp.output
|
||||
} else {
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) shutdown() {
|
||||
for !bp.buffer.empty() {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
case bp.output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
}
|
||||
}
|
||||
close(bp.output)
|
||||
for response := range bp.responses {
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
|
||||
if bp.closing != nil {
|
||||
return bp.closing
|
||||
}
|
||||
|
||||
return bp.currentRetries[msg.Topic][msg.Partition]
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
|
||||
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
|
||||
|
||||
for {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
// handling a response can change our state, so re-check some things
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
return reason
|
||||
} else if !bp.buffer.wouldOverflow(msg) {
|
||||
return nil
|
||||
}
|
||||
case bp.output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) rollOver() {
|
||||
bp.timer = nil
|
||||
bp.timerFired = false
|
||||
bp.buffer = newProduceSet(bp.parent)
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
|
||||
if response.err != nil {
|
||||
bp.handleError(response.set, response.err)
|
||||
} else {
|
||||
bp.handleSuccess(response.set, response.res)
|
||||
}
|
||||
|
||||
if bp.buffer.empty() {
|
||||
bp.rollOver() // this can happen if the response invalidated our buffer
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
|
||||
// we iterate through the blocks in the request set, not the response, so that we notice
|
||||
// if the response is missing a block completely
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
if response == nil {
|
||||
// this only happens when RequiredAcks is NoResponse, so we have to assume success
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
return
|
||||
}
|
||||
|
||||
block := response.GetBlock(topic, partition)
|
||||
if block == nil {
|
||||
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
|
||||
return
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
// Success
|
||||
case ErrNoError:
|
||||
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
|
||||
for _, msg := range msgs {
|
||||
msg.Timestamp = block.Timestamp
|
||||
}
|
||||
}
|
||||
for i, msg := range msgs {
|
||||
msg.Offset = block.Offset + int64(i)
|
||||
}
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
// Retriable errors
|
||||
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
|
||||
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
|
||||
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
|
||||
bp.broker.ID(), topic, partition, block.Err)
|
||||
bp.currentRetries[topic][partition] = block.Err
|
||||
bp.parent.retryMessages(msgs, block.Err)
|
||||
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
|
||||
// Other non-retriable errors
|
||||
default:
|
||||
bp.parent.returnErrors(msgs, block.Err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
|
||||
switch err.(type) {
|
||||
case PacketEncodingError:
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.returnErrors(msgs, err)
|
||||
})
|
||||
default:
|
||||
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
|
||||
bp.parent.abandonBrokerConnection(bp.broker)
|
||||
_ = bp.broker.Close()
|
||||
bp.closing = err
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
})
|
||||
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
})
|
||||
bp.rollOver()
|
||||
}
|
||||
}
|
||||
|
||||
// singleton
|
||||
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
|
||||
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
|
||||
func (p *asyncProducer) retryHandler() {
|
||||
var msg *ProducerMessage
|
||||
buf := queue.New()
|
||||
|
||||
for {
|
||||
if buf.Length() == 0 {
|
||||
msg = <-p.retries
|
||||
} else {
|
||||
select {
|
||||
case msg = <-p.retries:
|
||||
case p.input <- buf.Peek().(*ProducerMessage):
|
||||
buf.Remove()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf.Add(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// utility functions
|
||||
|
||||
func (p *asyncProducer) shutdown() {
|
||||
Logger.Println("Producer shutting down.")
|
||||
p.inFlight.Add(1)
|
||||
p.input <- &ProducerMessage{flags: shutdown}
|
||||
|
||||
p.inFlight.Wait()
|
||||
|
||||
if p.ownClient {
|
||||
err := p.client.Close()
|
||||
if err != nil {
|
||||
Logger.Println("producer/shutdown failed to close the embedded client:", err)
|
||||
}
|
||||
}
|
||||
|
||||
close(p.input)
|
||||
close(p.retries)
|
||||
close(p.errors)
|
||||
close(p.successes)
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
|
||||
msg.clear()
|
||||
pErr := &ProducerError{Msg: msg, Err: err}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
p.returnError(msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
|
||||
for _, msg := range batch {
|
||||
if p.conf.Producer.Return.Successes {
|
||||
msg.clear()
|
||||
p.successes <- msg
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
|
||||
if msg.retries >= p.conf.Producer.Retry.Max {
|
||||
p.returnError(msg, err)
|
||||
} else {
|
||||
msg.retries++
|
||||
p.retries <- msg
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
p.retryMessage(msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
bp := p.brokers[broker]
|
||||
|
||||
if bp == nil {
|
||||
bp = p.newBrokerProducer(broker)
|
||||
p.brokers[broker] = bp
|
||||
p.brokerRefs[bp] = 0
|
||||
}
|
||||
|
||||
p.brokerRefs[bp]++
|
||||
|
||||
return bp
|
||||
}
|
||||
|
||||
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
p.brokerRefs[bp]--
|
||||
if p.brokerRefs[bp] == 0 {
|
||||
close(bp)
|
||||
delete(p.brokerRefs, bp)
|
||||
|
||||
if p.brokers[broker] == bp {
|
||||
delete(p.brokers, broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
delete(p.brokers, broker)
|
||||
}
|
||||
674
vendor/github.com/Shopify/sarama/broker.go
generated
vendored
Normal file
674
vendor/github.com/Shopify/sarama/broker.go
generated
vendored
Normal file
|
|
@ -0,0 +1,674 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
|
||||
type Broker struct {
|
||||
id int32
|
||||
addr string
|
||||
|
||||
conf *Config
|
||||
correlationID int32
|
||||
conn net.Conn
|
||||
connErr error
|
||||
lock sync.Mutex
|
||||
opened int32
|
||||
|
||||
responses chan responsePromise
|
||||
done chan bool
|
||||
|
||||
incomingByteRate metrics.Meter
|
||||
requestRate metrics.Meter
|
||||
requestSize metrics.Histogram
|
||||
requestLatency metrics.Histogram
|
||||
outgoingByteRate metrics.Meter
|
||||
responseRate metrics.Meter
|
||||
responseSize metrics.Histogram
|
||||
brokerIncomingByteRate metrics.Meter
|
||||
brokerRequestRate metrics.Meter
|
||||
brokerRequestSize metrics.Histogram
|
||||
brokerRequestLatency metrics.Histogram
|
||||
brokerOutgoingByteRate metrics.Meter
|
||||
brokerResponseRate metrics.Meter
|
||||
brokerResponseSize metrics.Histogram
|
||||
}
|
||||
|
||||
type responsePromise struct {
|
||||
requestTime time.Time
|
||||
correlationID int32
|
||||
packets chan []byte
|
||||
errors chan error
|
||||
}
|
||||
|
||||
// NewBroker creates and returns a Broker targetting the given host:port address.
|
||||
// This does not attempt to actually connect, you have to call Open() for that.
|
||||
func NewBroker(addr string) *Broker {
|
||||
return &Broker{id: -1, addr: addr}
|
||||
}
|
||||
|
||||
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
|
||||
// waiting for the connection to complete. This means that any subsequent operations on the broker will
|
||||
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
|
||||
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
|
||||
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
|
||||
func (b *Broker) Open(conf *Config) error {
|
||||
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
|
||||
return ErrAlreadyConnected
|
||||
}
|
||||
|
||||
if conf == nil {
|
||||
conf = NewConfig()
|
||||
}
|
||||
|
||||
err := conf.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
|
||||
go withRecover(func() {
|
||||
defer b.lock.Unlock()
|
||||
|
||||
dialer := net.Dialer{
|
||||
Timeout: conf.Net.DialTimeout,
|
||||
KeepAlive: conf.Net.KeepAlive,
|
||||
}
|
||||
|
||||
if conf.Net.TLS.Enable {
|
||||
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
|
||||
} else {
|
||||
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
|
||||
}
|
||||
if b.connErr != nil {
|
||||
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
return
|
||||
}
|
||||
b.conn = newBufConn(b.conn)
|
||||
|
||||
b.conf = conf
|
||||
|
||||
// Create or reuse the global metrics shared between brokers
|
||||
b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
|
||||
b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
|
||||
b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
|
||||
b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
|
||||
b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
|
||||
b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
|
||||
b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
|
||||
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
|
||||
// the same id (-1) and are already exposed through the global metrics above
|
||||
if b.id >= 0 {
|
||||
b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
|
||||
b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
|
||||
b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
|
||||
b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
|
||||
b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
|
||||
b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
|
||||
b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
|
||||
}
|
||||
|
||||
if conf.Net.SASL.Enable {
|
||||
b.connErr = b.sendAndReceiveSASLPlainAuth()
|
||||
if b.connErr != nil {
|
||||
err = b.conn.Close()
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
|
||||
}
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
b.done = make(chan bool)
|
||||
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
|
||||
|
||||
if b.id >= 0 {
|
||||
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
|
||||
} else {
|
||||
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
|
||||
}
|
||||
go withRecover(b.responseReceiver)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connected returns true if the broker is connected and false otherwise. If the broker is not
|
||||
// connected but it had tried to connect, the error from that connection attempt is also returned.
|
||||
func (b *Broker) Connected() (bool, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
return b.conn != nil, b.connErr
|
||||
}
|
||||
|
||||
func (b *Broker) Close() error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.conn == nil {
|
||||
return ErrNotConnected
|
||||
}
|
||||
|
||||
close(b.responses)
|
||||
<-b.done
|
||||
|
||||
err := b.conn.Close()
|
||||
|
||||
b.conn = nil
|
||||
b.connErr = nil
|
||||
b.done = nil
|
||||
b.responses = nil
|
||||
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
|
||||
func (b *Broker) ID() int32 {
|
||||
return b.id
|
||||
}
|
||||
|
||||
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
|
||||
func (b *Broker) Addr() string {
|
||||
return b.addr
|
||||
}
|
||||
|
||||
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
|
||||
response := new(MetadataResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
|
||||
response := new(ConsumerMetadataResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
|
||||
response := new(OffsetResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
|
||||
var response *ProduceResponse
|
||||
var err error
|
||||
|
||||
if request.RequiredAcks == NoResponse {
|
||||
err = b.sendAndReceive(request, nil)
|
||||
} else {
|
||||
response = new(ProduceResponse)
|
||||
err = b.sendAndReceive(request, response)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
|
||||
response := new(FetchResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
|
||||
response := new(OffsetCommitResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
|
||||
response := new(OffsetFetchResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
|
||||
response := new(JoinGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
|
||||
response := new(SyncGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
|
||||
response := new(LeaveGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
|
||||
response := new(HeartbeatResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
|
||||
response := new(ListGroupsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
|
||||
response := new(DescribeGroupsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.conn == nil {
|
||||
if b.connErr != nil {
|
||||
return nil, b.connErr
|
||||
}
|
||||
return nil, ErrNotConnected
|
||||
}
|
||||
|
||||
if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
|
||||
return nil, ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
|
||||
buf, err := encode(req, b.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytes, err := b.conn.Write(buf)
|
||||
b.updateOutgoingCommunicationMetrics(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.correlationID++
|
||||
|
||||
if !promiseResponse {
|
||||
// Record request latency without the response
|
||||
b.updateRequestLatencyMetrics(time.Since(requestTime))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
|
||||
b.responses <- promise
|
||||
|
||||
return &promise, nil
|
||||
}
|
||||
|
||||
func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
|
||||
promise, err := b.send(req, res != nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if promise == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case buf := <-promise.packets:
|
||||
return versionedDecode(buf, res, req.version())
|
||||
case err = <-promise.errors:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) decode(pd packetDecoder) (err error) {
|
||||
b.id, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.addr = net.JoinHostPort(host, fmt.Sprint(port))
|
||||
if _, _, err := net.SplitHostPort(b.addr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) encode(pe packetEncoder) (err error) {
|
||||
|
||||
host, portstr, err := net.SplitHostPort(b.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.Atoi(portstr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(b.id)
|
||||
|
||||
err = pe.putString(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(int32(port))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) responseReceiver() {
|
||||
var dead error
|
||||
header := make([]byte, 8)
|
||||
for response := range b.responses {
|
||||
if dead != nil {
|
||||
response.errors <- dead
|
||||
continue
|
||||
}
|
||||
|
||||
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
|
||||
if err != nil {
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
bytesReadHeader, err := io.ReadFull(b.conn, header)
|
||||
requestLatency := time.Since(response.requestTime)
|
||||
if err != nil {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
decodedHeader := responseHeader{}
|
||||
err = decode(header, &decodedHeader)
|
||||
if err != nil {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
if decodedHeader.correlationID != response.correlationID {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
// TODO if decoded ID < cur ID, discard until we catch up
|
||||
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
|
||||
dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
|
||||
response.errors <- dead
|
||||
continue
|
||||
}
|
||||
|
||||
buf := make([]byte, decodedHeader.length-4)
|
||||
bytesReadBody, err := io.ReadFull(b.conn, buf)
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
|
||||
if err != nil {
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
response.packets <- buf
|
||||
}
|
||||
close(b.done)
|
||||
}
|
||||
|
||||
func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
|
||||
rb := &SaslHandshakeRequest{"PLAIN"}
|
||||
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
|
||||
buf, err := encode(req, b.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytes, err := b.conn.Write(buf)
|
||||
b.updateOutgoingCommunicationMetrics(bytes)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
b.correlationID++
|
||||
//wait for the response
|
||||
header := make([]byte, 8) // response header
|
||||
_, err = io.ReadFull(b.conn, header)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
length := binary.BigEndian.Uint32(header[:4])
|
||||
payload := make([]byte, length-4)
|
||||
n, err := io.ReadFull(b.conn, payload)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
|
||||
res := &SaslHandshakeResponse{}
|
||||
err = versionedDecode(payload, res, 0)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
if res.Err != ErrNoError {
|
||||
Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
|
||||
return res.Err
|
||||
}
|
||||
Logger.Print("Successful SASL handshake")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
|
||||
// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
|
||||
//
|
||||
// In SASL Plain, Kafka expects the auth header to be in the following format
|
||||
// Message format (from https://tools.ietf.org/html/rfc4616):
|
||||
//
|
||||
// message = [authzid] UTF8NUL authcid UTF8NUL passwd
|
||||
// authcid = 1*SAFE ; MUST accept up to 255 octets
|
||||
// authzid = 1*SAFE ; MUST accept up to 255 octets
|
||||
// passwd = 1*SAFE ; MUST accept up to 255 octets
|
||||
// UTF8NUL = %x00 ; UTF-8 encoded NUL character
|
||||
//
|
||||
// SAFE = UTF1 / UTF2 / UTF3 / UTF4
|
||||
// ;; any UTF-8 encoded Unicode character except NUL
|
||||
//
|
||||
// When credentials are valid, Kafka returns a 4 byte array of null characters.
|
||||
// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
|
||||
// of responding to bad credentials but thats how its being done today.
|
||||
func (b *Broker) sendAndReceiveSASLPlainAuth() error {
|
||||
if b.conf.Net.SASL.Handshake {
|
||||
handshakeErr := b.sendAndReceiveSASLPlainHandshake()
|
||||
if handshakeErr != nil {
|
||||
Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
|
||||
return handshakeErr
|
||||
}
|
||||
}
|
||||
length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
|
||||
authBytes := make([]byte, length+4) //4 byte length header + auth data
|
||||
binary.BigEndian.PutUint32(authBytes, uint32(length))
|
||||
copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
|
||||
|
||||
err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytesWritten, err := b.conn.Write(authBytes)
|
||||
b.updateOutgoingCommunicationMetrics(bytesWritten)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
header := make([]byte, 4)
|
||||
n, err := io.ReadFull(b.conn, header)
|
||||
b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
|
||||
// If the credentials are valid, we would get a 4 byte response filled with null characters.
|
||||
// Otherwise, the broker closes the connection and we get an EOF
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
|
||||
b.updateRequestLatencyMetrics(requestLatency)
|
||||
b.responseRate.Mark(1)
|
||||
if b.brokerResponseRate != nil {
|
||||
b.brokerResponseRate.Mark(1)
|
||||
}
|
||||
responseSize := int64(bytes)
|
||||
b.incomingByteRate.Mark(responseSize)
|
||||
if b.brokerIncomingByteRate != nil {
|
||||
b.brokerIncomingByteRate.Mark(responseSize)
|
||||
}
|
||||
b.responseSize.Update(responseSize)
|
||||
if b.brokerResponseSize != nil {
|
||||
b.brokerResponseSize.Update(responseSize)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
|
||||
requestLatencyInMs := int64(requestLatency / time.Millisecond)
|
||||
b.requestLatency.Update(requestLatencyInMs)
|
||||
if b.brokerRequestLatency != nil {
|
||||
b.brokerRequestLatency.Update(requestLatencyInMs)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
|
||||
b.requestRate.Mark(1)
|
||||
if b.brokerRequestRate != nil {
|
||||
b.brokerRequestRate.Mark(1)
|
||||
}
|
||||
requestSize := int64(bytes)
|
||||
b.outgoingByteRate.Mark(requestSize)
|
||||
if b.brokerOutgoingByteRate != nil {
|
||||
b.brokerOutgoingByteRate.Mark(requestSize)
|
||||
}
|
||||
b.requestSize.Update(requestSize)
|
||||
if b.brokerRequestSize != nil {
|
||||
b.brokerRequestSize.Update(requestSize)
|
||||
}
|
||||
}
|
||||
749
vendor/github.com/Shopify/sarama/client.go
generated
vendored
Normal file
749
vendor/github.com/Shopify/sarama/client.go
generated
vendored
Normal file
|
|
@ -0,0 +1,749 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
|
||||
// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
|
||||
// automatically when it passes out of scope. It is safe to share a client amongst many
|
||||
// users, however Kafka will process requests from a single client strictly in serial,
|
||||
// so it is generally more efficient to use the default one client per producer/consumer.
|
||||
type Client interface {
|
||||
// Config returns the Config struct of the client. This struct should not be
|
||||
// altered after it has been created.
|
||||
Config() *Config
|
||||
|
||||
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
|
||||
Brokers() []*Broker
|
||||
|
||||
// Topics returns the set of available topics as retrieved from cluster metadata.
|
||||
Topics() ([]string, error)
|
||||
|
||||
// Partitions returns the sorted list of all partition IDs for the given topic.
|
||||
Partitions(topic string) ([]int32, error)
|
||||
|
||||
// WritablePartitions returns the sorted list of all writable partition IDs for
|
||||
// the given topic, where "writable" means "having a valid leader accepting
|
||||
// writes".
|
||||
WritablePartitions(topic string) ([]int32, error)
|
||||
|
||||
// Leader returns the broker object that is the leader of the current
|
||||
// topic/partition, as determined by querying the cluster metadata.
|
||||
Leader(topic string, partitionID int32) (*Broker, error)
|
||||
|
||||
// Replicas returns the set of all replica IDs for the given partition.
|
||||
Replicas(topic string, partitionID int32) ([]int32, error)
|
||||
|
||||
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
|
||||
// available metadata for those topics. If no topics are provided, it will refresh
|
||||
// metadata for all topics.
|
||||
RefreshMetadata(topics ...string) error
|
||||
|
||||
// GetOffset queries the cluster to get the most recent available offset at the
|
||||
// given time on the topic/partition combination. Time should be OffsetOldest for
|
||||
// the earliest available offset, OffsetNewest for the offset of the message that
|
||||
// will be produced next, or a time.
|
||||
GetOffset(topic string, partitionID int32, time int64) (int64, error)
|
||||
|
||||
// Coordinator returns the coordinating broker for a consumer group. It will
|
||||
// return a locally cached value if it's available. You can call
|
||||
// RefreshCoordinator to update the cached value. This function only works on
|
||||
// Kafka 0.8.2 and higher.
|
||||
Coordinator(consumerGroup string) (*Broker, error)
|
||||
|
||||
// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
|
||||
// in local cache. This function only works on Kafka 0.8.2 and higher.
|
||||
RefreshCoordinator(consumerGroup string) error
|
||||
|
||||
// Close shuts down all broker connections managed by this client. It is required
|
||||
// to call this function before a client object passes out of scope, as it will
|
||||
// otherwise leak memory. You must close any Producers or Consumers using a client
|
||||
// before you close the client.
|
||||
Close() error
|
||||
|
||||
// Closed returns true if the client has already had Close called on it
|
||||
Closed() bool
|
||||
}
|
||||
|
||||
const (
|
||||
// OffsetNewest stands for the log head offset, i.e. the offset that will be
|
||||
// assigned to the next message that will be produced to the partition. You
|
||||
// can send this to a client's GetOffset method to get this offset, or when
|
||||
// calling ConsumePartition to start consuming new messages.
|
||||
OffsetNewest int64 = -1
|
||||
// OffsetOldest stands for the oldest offset available on the broker for a
|
||||
// partition. You can send this to a client's GetOffset method to get this
|
||||
// offset, or when calling ConsumePartition to start consuming from the
|
||||
// oldest offset that is still available on the broker.
|
||||
OffsetOldest int64 = -2
|
||||
)
|
||||
|
||||
type client struct {
|
||||
conf *Config
|
||||
closer, closed chan none // for shutting down background metadata updater
|
||||
|
||||
// the broker addresses given to us through the constructor are not guaranteed to be returned in
|
||||
// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
|
||||
// so we store them separately
|
||||
seedBrokers []*Broker
|
||||
deadSeeds []*Broker
|
||||
|
||||
brokers map[int32]*Broker // maps broker ids to brokers
|
||||
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
|
||||
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
|
||||
|
||||
// If the number of partitions is large, we can get some churn calling cachedPartitions,
|
||||
// so the result is cached. It is important to update this value whenever metadata is changed
|
||||
cachedPartitionsResults map[string][maxPartitionIndex][]int32
|
||||
|
||||
lock sync.RWMutex // protects access to the maps that hold cluster state.
|
||||
}
|
||||
|
||||
// NewClient creates a new Client. It connects to one of the given broker addresses
|
||||
// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
|
||||
// be retrieved from any of the given broker addresses, the client is not created.
|
||||
func NewClient(addrs []string, conf *Config) (Client, error) {
|
||||
Logger.Println("Initializing new client")
|
||||
|
||||
if conf == nil {
|
||||
conf = NewConfig()
|
||||
}
|
||||
|
||||
if err := conf.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(addrs) < 1 {
|
||||
return nil, ConfigurationError("You must provide at least one broker address")
|
||||
}
|
||||
|
||||
client := &client{
|
||||
conf: conf,
|
||||
closer: make(chan none),
|
||||
closed: make(chan none),
|
||||
brokers: make(map[int32]*Broker),
|
||||
metadata: make(map[string]map[int32]*PartitionMetadata),
|
||||
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
|
||||
coordinators: make(map[string]int32),
|
||||
}
|
||||
|
||||
random := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, index := range random.Perm(len(addrs)) {
|
||||
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
|
||||
}
|
||||
|
||||
// do an initial fetch of all cluster metadata by specifing an empty list of topics
|
||||
err := client.RefreshMetadata()
|
||||
switch err {
|
||||
case nil:
|
||||
break
|
||||
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
|
||||
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
|
||||
Logger.Println(err)
|
||||
default:
|
||||
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
|
||||
_ = client.Close()
|
||||
return nil, err
|
||||
}
|
||||
go withRecover(client.backgroundMetadataUpdater)
|
||||
|
||||
Logger.Println("Successfully initialized new client")
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (client *client) Config() *Config {
|
||||
return client.conf
|
||||
}
|
||||
|
||||
func (client *client) Brokers() []*Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
brokers := make([]*Broker, 0)
|
||||
for _, broker := range client.brokers {
|
||||
brokers = append(brokers, broker)
|
||||
}
|
||||
return brokers
|
||||
}
|
||||
|
||||
func (client *client) Close() error {
|
||||
if client.Closed() {
|
||||
// Chances are this is being called from a defer() and the error will go unobserved
|
||||
// so we go ahead and log the event in this case.
|
||||
Logger.Printf("Close() called on already closed client")
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
// shutdown and wait for the background thread before we take the lock, to avoid races
|
||||
close(client.closer)
|
||||
<-client.closed
|
||||
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
Logger.Println("Closing Client")
|
||||
|
||||
for _, broker := range client.brokers {
|
||||
safeAsyncClose(broker)
|
||||
}
|
||||
|
||||
for _, broker := range client.seedBrokers {
|
||||
safeAsyncClose(broker)
|
||||
}
|
||||
|
||||
client.brokers = nil
|
||||
client.metadata = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) Closed() bool {
|
||||
return client.brokers == nil
|
||||
}
|
||||
|
||||
func (client *client) Topics() ([]string, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
ret := make([]string, 0, len(client.metadata))
|
||||
for topic := range client.metadata {
|
||||
ret = append(ret, topic)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (client *client) Partitions(topic string) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
partitions := client.cachedPartitions(topic, allPartitions)
|
||||
|
||||
if len(partitions) == 0 {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partitions = client.cachedPartitions(topic, allPartitions)
|
||||
}
|
||||
|
||||
if partitions == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
func (client *client) WritablePartitions(topic string) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
partitions := client.cachedPartitions(topic, writablePartitions)
|
||||
|
||||
// len==0 catches when it's nil (no such topic) and the odd case when every single
|
||||
// partition is undergoing leader election simultaneously. Callers have to be able to handle
|
||||
// this function returning an empty slice (which is a valid return value) but catching it
|
||||
// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
|
||||
// a metadata refresh as a nicety so callers can just try again and don't have to manually
|
||||
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
|
||||
if len(partitions) == 0 {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partitions = client.cachedPartitions(topic, writablePartitions)
|
||||
}
|
||||
|
||||
if partitions == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
metadata := client.cachedMetadata(topic, partitionID)
|
||||
|
||||
if metadata == nil {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadata = client.cachedMetadata(topic, partitionID)
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
if metadata.Err == ErrReplicaNotAvailable {
|
||||
return nil, metadata.Err
|
||||
}
|
||||
return dupeAndSort(metadata.Replicas), nil
|
||||
}
|
||||
|
||||
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
leader, err := client.cachedLeader(topic, partitionID)
|
||||
|
||||
if leader == nil {
|
||||
err = client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
leader, err = client.cachedLeader(topic, partitionID)
|
||||
}
|
||||
|
||||
return leader, err
|
||||
}
|
||||
|
||||
func (client *client) RefreshMetadata(topics ...string) error {
|
||||
if client.Closed() {
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
|
||||
// error. This handles the case by returning an error instead of sending it
|
||||
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
|
||||
for _, topic := range topics {
|
||||
if len(topic) == 0 {
|
||||
return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
|
||||
}
|
||||
}
|
||||
|
||||
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
|
||||
}
|
||||
|
||||
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
|
||||
if client.Closed() {
|
||||
return -1, ErrClosedClient
|
||||
}
|
||||
|
||||
offset, err := client.getOffset(topic, partitionID, time)
|
||||
|
||||
if err != nil {
|
||||
if err := client.RefreshMetadata(topic); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return client.getOffset(topic, partitionID, time)
|
||||
}
|
||||
|
||||
return offset, err
|
||||
}
|
||||
|
||||
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
coordinator := client.cachedCoordinator(consumerGroup)
|
||||
|
||||
if coordinator == nil {
|
||||
if err := client.RefreshCoordinator(consumerGroup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coordinator = client.cachedCoordinator(consumerGroup)
|
||||
}
|
||||
|
||||
if coordinator == nil {
|
||||
return nil, ErrConsumerCoordinatorNotAvailable
|
||||
}
|
||||
|
||||
_ = coordinator.Open(client.conf)
|
||||
return coordinator, nil
|
||||
}
|
||||
|
||||
func (client *client) RefreshCoordinator(consumerGroup string) error {
|
||||
if client.Closed() {
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
client.registerBroker(response.Coordinator)
|
||||
client.coordinators[consumerGroup] = response.Coordinator.ID()
|
||||
return nil
|
||||
}
|
||||
|
||||
// private broker management helpers
|
||||
|
||||
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
|
||||
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
|
||||
// or a previously registered Broker instance. You must hold the write lock before calling this function.
|
||||
func (client *client) registerBroker(broker *Broker) {
|
||||
if client.brokers[broker.ID()] == nil {
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
|
||||
} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
|
||||
safeAsyncClose(client.brokers[broker.ID()])
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
|
||||
}
|
||||
}
|
||||
|
||||
// deregisterBroker removes a broker from the seedsBroker list, and if it's
|
||||
// not the seedbroker, removes it from brokers map completely.
|
||||
func (client *client) deregisterBroker(broker *Broker) {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
|
||||
client.deadSeeds = append(client.deadSeeds, broker)
|
||||
client.seedBrokers = client.seedBrokers[1:]
|
||||
} else {
|
||||
// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
|
||||
// but we really shouldn't have to; once that loop is made better this case can be
|
||||
// removed, and the function generally can be renamed from `deregisterBroker` to
|
||||
// `nextSeedBroker` or something
|
||||
Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
|
||||
delete(client.brokers, broker.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func (client *client) resurrectDeadBrokers() {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
|
||||
client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
|
||||
client.deadSeeds = nil
|
||||
}
|
||||
|
||||
func (client *client) any() *Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
if len(client.seedBrokers) > 0 {
|
||||
_ = client.seedBrokers[0].Open(client.conf)
|
||||
return client.seedBrokers[0]
|
||||
}
|
||||
|
||||
// not guaranteed to be random *or* deterministic
|
||||
for _, broker := range client.brokers {
|
||||
_ = broker.Open(client.conf)
|
||||
return broker
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// private caching/lazy metadata helpers
|
||||
|
||||
type partitionType int
|
||||
|
||||
const (
|
||||
allPartitions partitionType = iota
|
||||
writablePartitions
|
||||
// If you add any more types, update the partition cache in update()
|
||||
|
||||
// Ensure this is the last partition type value
|
||||
maxPartitionIndex
|
||||
)
|
||||
|
||||
func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions := client.metadata[topic]
|
||||
if partitions != nil {
|
||||
return partitions[partitionID]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions, exists := client.cachedPartitionsResults[topic]
|
||||
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return partitions[partitionSet]
|
||||
}
|
||||
|
||||
func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
|
||||
partitions := client.metadata[topic]
|
||||
|
||||
if partitions == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := make([]int32, 0, len(partitions))
|
||||
for _, partition := range partitions {
|
||||
if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, partition.ID)
|
||||
}
|
||||
|
||||
sort.Sort(int32Slice(ret))
|
||||
return ret
|
||||
}
|
||||
|
||||
func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions := client.metadata[topic]
|
||||
if partitions != nil {
|
||||
metadata, ok := partitions[partitionID]
|
||||
if ok {
|
||||
if metadata.Err == ErrLeaderNotAvailable {
|
||||
return nil, ErrLeaderNotAvailable
|
||||
}
|
||||
b := client.brokers[metadata.Leader]
|
||||
if b == nil {
|
||||
return nil, ErrLeaderNotAvailable
|
||||
}
|
||||
_ = b.Open(client.conf)
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
|
||||
broker, err := client.Leader(topic, partitionID)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
request := &OffsetRequest{}
|
||||
if client.conf.Version.IsAtLeast(V0_10_1_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
request.AddBlock(topic, partitionID, time, 1)
|
||||
|
||||
response, err := broker.GetAvailableOffsets(request)
|
||||
if err != nil {
|
||||
_ = broker.Close()
|
||||
return -1, err
|
||||
}
|
||||
|
||||
block := response.GetBlock(topic, partitionID)
|
||||
if block == nil {
|
||||
_ = broker.Close()
|
||||
return -1, ErrIncompleteResponse
|
||||
}
|
||||
if block.Err != ErrNoError {
|
||||
return -1, block.Err
|
||||
}
|
||||
if len(block.Offsets) != 1 {
|
||||
return -1, ErrOffsetOutOfRange
|
||||
}
|
||||
|
||||
return block.Offsets[0], nil
|
||||
}
|
||||
|
||||
// core metadata update logic
|
||||
|
||||
func (client *client) backgroundMetadataUpdater() {
|
||||
defer close(client.closed)
|
||||
|
||||
if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := client.RefreshMetadata(); err != nil {
|
||||
Logger.Println("Client background metadata update:", err)
|
||||
}
|
||||
case <-client.closer:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
|
||||
retry := func(err error) error {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
if len(topics) > 0 {
|
||||
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
|
||||
} else {
|
||||
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
|
||||
}
|
||||
response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
// valid response, use it
|
||||
if shouldRetry, err := client.updateMetadata(response); shouldRetry {
|
||||
Logger.Println("client/metadata found some partitions to be leaderless")
|
||||
return retry(err) // note: err can be nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
case PacketEncodingError:
|
||||
// didn't even send, return the error
|
||||
return err
|
||||
default:
|
||||
// some other error, remove that broker and try again
|
||||
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
}
|
||||
}
|
||||
|
||||
Logger.Println("client/metadata no available broker to send metadata request to")
|
||||
client.resurrectDeadBrokers()
|
||||
return retry(ErrOutOfBrokers)
|
||||
}
|
||||
|
||||
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
|
||||
func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
// For all the brokers we received:
|
||||
// - if it is a new ID, save it
|
||||
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
|
||||
// - otherwise ignore it, replacing our existing one would just bounce the connection
|
||||
for _, broker := range data.Brokers {
|
||||
client.registerBroker(broker)
|
||||
}
|
||||
|
||||
for _, topic := range data.Topics {
|
||||
delete(client.metadata, topic.Name)
|
||||
delete(client.cachedPartitionsResults, topic.Name)
|
||||
|
||||
switch topic.Err {
|
||||
case ErrNoError:
|
||||
break
|
||||
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
|
||||
err = topic.Err
|
||||
continue
|
||||
case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
|
||||
err = topic.Err
|
||||
retry = true
|
||||
continue
|
||||
case ErrLeaderNotAvailable: // retry, but store partial partition results
|
||||
retry = true
|
||||
break
|
||||
default: // don't retry, don't store partial results
|
||||
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
|
||||
err = topic.Err
|
||||
continue
|
||||
}
|
||||
|
||||
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
|
||||
for _, partition := range topic.Partitions {
|
||||
client.metadata[topic.Name][partition.ID] = partition
|
||||
if partition.Err == ErrLeaderNotAvailable {
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
|
||||
var partitionCache [maxPartitionIndex][]int32
|
||||
partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
|
||||
partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
|
||||
client.cachedPartitionsResults[topic.Name] = partitionCache
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (client *client) cachedCoordinator(consumerGroup string) *Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
|
||||
return client.brokers[coordinatorID]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
|
||||
retry := func(err error) (*ConsumerMetadataResponse, error) {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
|
||||
|
||||
request := new(ConsumerMetadataRequest)
|
||||
request.ConsumerGroup = consumerGroup
|
||||
|
||||
response, err := broker.GetConsumerMetadata(request)
|
||||
|
||||
if err != nil {
|
||||
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
|
||||
|
||||
switch err.(type) {
|
||||
case PacketEncodingError:
|
||||
return nil, err
|
||||
default:
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
switch response.Err {
|
||||
case ErrNoError:
|
||||
Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
|
||||
return response, nil
|
||||
|
||||
case ErrConsumerCoordinatorNotAvailable:
|
||||
Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
|
||||
|
||||
// This is very ugly, but this scenario will only happen once per cluster.
|
||||
// The __consumer_offsets topic only has to be created one time.
|
||||
// The number of partitions not configurable, but partition 0 should always exist.
|
||||
if _, err := client.Leader("__consumer_offsets", 0); err != nil {
|
||||
Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
return retry(ErrConsumerCoordinatorNotAvailable)
|
||||
default:
|
||||
return nil, response.Err
|
||||
}
|
||||
}
|
||||
|
||||
Logger.Println("client/coordinator no available broker to send consumer metadata request to")
|
||||
client.resurrectDeadBrokers()
|
||||
return retry(ErrOutOfBrokers)
|
||||
}
|
||||
417
vendor/github.com/Shopify/sarama/config.go
generated
vendored
Normal file
417
vendor/github.com/Shopify/sarama/config.go
generated
vendored
Normal file
|
|
@ -0,0 +1,417 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
const defaultClientID = "sarama"
|
||||
|
||||
var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
|
||||
|
||||
// Config is used to pass multiple configuration options to Sarama's constructors.
|
||||
type Config struct {
|
||||
// Net is the namespace for network-level properties used by the Broker, and
|
||||
// shared by the Client/Producer/Consumer.
|
||||
Net struct {
|
||||
// How many outstanding requests a connection is allowed to have before
|
||||
// sending on it blocks (default 5).
|
||||
MaxOpenRequests int
|
||||
|
||||
// All three of the below configurations are similar to the
|
||||
// `socket.timeout.ms` setting in JVM kafka. All of them default
|
||||
// to 30 seconds.
|
||||
DialTimeout time.Duration // How long to wait for the initial connection.
|
||||
ReadTimeout time.Duration // How long to wait for a response.
|
||||
WriteTimeout time.Duration // How long to wait for a transmit.
|
||||
|
||||
TLS struct {
|
||||
// Whether or not to use TLS when connecting to the broker
|
||||
// (defaults to false).
|
||||
Enable bool
|
||||
// The TLS configuration to use for secure connections if
|
||||
// enabled (defaults to nil).
|
||||
Config *tls.Config
|
||||
}
|
||||
|
||||
// SASL based authentication with broker. While there are multiple SASL authentication methods
|
||||
// the current implementation is limited to plaintext (SASL/PLAIN) authentication
|
||||
SASL struct {
|
||||
// Whether or not to use SASL authentication when connecting to the broker
|
||||
// (defaults to false).
|
||||
Enable bool
|
||||
// Whether or not to send the Kafka SASL handshake first if enabled
|
||||
// (defaults to true). You should only set this to false if you're using
|
||||
// a non-Kafka SASL proxy.
|
||||
Handshake bool
|
||||
//username and password for SASL/PLAIN authentication
|
||||
User string
|
||||
Password string
|
||||
}
|
||||
|
||||
// KeepAlive specifies the keep-alive period for an active network connection.
|
||||
// If zero, keep-alives are disabled. (default is 0: disabled).
|
||||
KeepAlive time.Duration
|
||||
}
|
||||
|
||||
// Metadata is the namespace for metadata management properties used by the
|
||||
// Client, and shared by the Producer/Consumer.
|
||||
Metadata struct {
|
||||
Retry struct {
|
||||
// The total number of times to retry a metadata request when the
|
||||
// cluster is in the middle of a leader election (default 3).
|
||||
Max int
|
||||
// How long to wait for leader election to occur before retrying
|
||||
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
|
||||
Backoff time.Duration
|
||||
}
|
||||
// How frequently to refresh the cluster metadata in the background.
|
||||
// Defaults to 10 minutes. Set to 0 to disable. Similar to
|
||||
// `topic.metadata.refresh.interval.ms` in the JVM version.
|
||||
RefreshFrequency time.Duration
|
||||
}
|
||||
|
||||
// Producer is the namespace for configuration related to producing messages,
|
||||
// used by the Producer.
|
||||
Producer struct {
|
||||
// The maximum permitted size of a message (defaults to 1000000). Should be
|
||||
// set equal to or smaller than the broker's `message.max.bytes`.
|
||||
MaxMessageBytes int
|
||||
// The level of acknowledgement reliability needed from the broker (defaults
|
||||
// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
|
||||
// JVM producer.
|
||||
RequiredAcks RequiredAcks
|
||||
// The maximum duration the broker will wait the receipt of the number of
|
||||
// RequiredAcks (defaults to 10 seconds). This is only relevant when
|
||||
// RequiredAcks is set to WaitForAll or a number > 1. Only supports
|
||||
// millisecond resolution, nanoseconds will be truncated. Equivalent to
|
||||
// the JVM producer's `request.timeout.ms` setting.
|
||||
Timeout time.Duration
|
||||
// The type of compression to use on messages (defaults to no compression).
|
||||
// Similar to `compression.codec` setting of the JVM producer.
|
||||
Compression CompressionCodec
|
||||
// Generates partitioners for choosing the partition to send messages to
|
||||
// (defaults to hashing the message key). Similar to the `partitioner.class`
|
||||
// setting for the JVM producer.
|
||||
Partitioner PartitionerConstructor
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
// you must read from the respective channels to prevent deadlock.
|
||||
Return struct {
|
||||
// If enabled, successfully delivered messages will be returned on the
|
||||
// Successes channel (default disabled).
|
||||
Successes bool
|
||||
|
||||
// If enabled, messages that failed to deliver will be returned on the
|
||||
// Errors channel, including error (default enabled).
|
||||
Errors bool
|
||||
}
|
||||
|
||||
// The following config options control how often messages are batched up and
|
||||
// sent to the broker. By default, messages are sent as fast as possible, and
|
||||
// all messages received while the current batch is in-flight are placed
|
||||
// into the subsequent batch.
|
||||
Flush struct {
|
||||
// The best-effort number of bytes needed to trigger a flush. Use the
|
||||
// global sarama.MaxRequestSize to set a hard upper limit.
|
||||
Bytes int
|
||||
// The best-effort number of messages needed to trigger a flush. Use
|
||||
// `MaxMessages` to set a hard upper limit.
|
||||
Messages int
|
||||
// The best-effort frequency of flushes. Equivalent to
|
||||
// `queue.buffering.max.ms` setting of JVM producer.
|
||||
Frequency time.Duration
|
||||
// The maximum number of messages the producer will send in a single
|
||||
// broker request. Defaults to 0 for unlimited. Similar to
|
||||
// `queue.buffering.max.messages` in the JVM producer.
|
||||
MaxMessages int
|
||||
}
|
||||
|
||||
Retry struct {
|
||||
// The total number of times to retry sending a message (default 3).
|
||||
// Similar to the `message.send.max.retries` setting of the JVM producer.
|
||||
Max int
|
||||
// How long to wait for the cluster to settle between retries
|
||||
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
|
||||
// JVM producer.
|
||||
Backoff time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
// Consumer is the namespace for configuration related to consuming messages,
|
||||
// used by the Consumer.
|
||||
//
|
||||
// Note that Sarama's Consumer type does not currently support automatic
|
||||
// consumer-group rebalancing and offset tracking. For Zookeeper-based
|
||||
// tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
|
||||
// library builds on Sarama to add this support. For Kafka-based tracking
|
||||
// (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
|
||||
// builds on Sarama to add this support.
|
||||
Consumer struct {
|
||||
Retry struct {
|
||||
// How long to wait after a failing to read from a partition before
|
||||
// trying again (default 2s).
|
||||
Backoff time.Duration
|
||||
}
|
||||
|
||||
// Fetch is the namespace for controlling how many bytes are retrieved by any
|
||||
// given request.
|
||||
Fetch struct {
|
||||
// The minimum number of message bytes to fetch in a request - the broker
|
||||
// will wait until at least this many are available. The default is 1,
|
||||
// as 0 causes the consumer to spin when no messages are available.
|
||||
// Equivalent to the JVM's `fetch.min.bytes`.
|
||||
Min int32
|
||||
// The default number of message bytes to fetch from the broker in each
|
||||
// request (default 32768). This should be larger than the majority of
|
||||
// your messages, or else the consumer will spend a lot of time
|
||||
// negotiating sizes and not actually consuming. Similar to the JVM's
|
||||
// `fetch.message.max.bytes`.
|
||||
Default int32
|
||||
// The maximum number of message bytes to fetch from the broker in a
|
||||
// single request. Messages larger than this will return
|
||||
// ErrMessageTooLarge and will not be consumable, so you must be sure
|
||||
// this is at least as large as your largest message. Defaults to 0
|
||||
// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
|
||||
// global `sarama.MaxResponseSize` still applies.
|
||||
Max int32
|
||||
}
|
||||
// The maximum amount of time the broker will wait for Consumer.Fetch.Min
|
||||
// bytes to become available before it returns fewer than that anyways. The
|
||||
// default is 250ms, since 0 causes the consumer to spin when no events are
|
||||
// available. 100-500ms is a reasonable range for most cases. Kafka only
|
||||
// supports precision up to milliseconds; nanoseconds will be truncated.
|
||||
// Equivalent to the JVM's `fetch.wait.max.ms`.
|
||||
MaxWaitTime time.Duration
|
||||
|
||||
// The maximum amount of time the consumer expects a message takes to process
|
||||
// for the user. If writing to the Messages channel takes longer than this,
|
||||
// that partition will stop fetching more messages until it can proceed again.
|
||||
// Note that, since the Messages channel is buffered, the actual grace time is
|
||||
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
|
||||
MaxProcessingTime time.Duration
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
// you must read from them to prevent deadlock.
|
||||
Return struct {
|
||||
// If enabled, any errors that occurred while consuming are returned on
|
||||
// the Errors channel (default disabled).
|
||||
Errors bool
|
||||
}
|
||||
|
||||
// Offsets specifies configuration for how and when to commit consumed
|
||||
// offsets. This currently requires the manual use of an OffsetManager
|
||||
// but will eventually be automated.
|
||||
Offsets struct {
|
||||
// How frequently to commit updated offsets. Defaults to 1s.
|
||||
CommitInterval time.Duration
|
||||
|
||||
// The initial offset to use if no offset was previously committed.
|
||||
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
|
||||
Initial int64
|
||||
|
||||
// The retention duration for committed offsets. If zero, disabled
|
||||
// (in which case the `offsets.retention.minutes` option on the
|
||||
// broker will be used). Kafka only supports precision up to
|
||||
// milliseconds; nanoseconds will be truncated. Requires Kafka
|
||||
// broker version 0.9.0 or later.
|
||||
// (default is 0: disabled).
|
||||
Retention time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
// A user-provided string sent with every request to the brokers for logging,
|
||||
// debugging, and auditing purposes. Defaults to "sarama", but you should
|
||||
// probably set it to something specific to your application.
|
||||
ClientID string
|
||||
// The number of events to buffer in internal and external channels. This
|
||||
// permits the producer and consumer to continue processing some messages
|
||||
// in the background while user code is working, greatly improving throughput.
|
||||
// Defaults to 256.
|
||||
ChannelBufferSize int
|
||||
// The version of Kafka that Sarama will assume it is running against.
|
||||
// Defaults to the oldest supported stable version. Since Kafka provides
|
||||
// backwards-compatibility, setting it to a version older than you have
|
||||
// will not break anything, although it may prevent you from using the
|
||||
// latest features. Setting it to a version greater than you are actually
|
||||
// running may lead to random breakage.
|
||||
Version KafkaVersion
|
||||
// The registry to define metrics into.
|
||||
// Defaults to a local registry.
|
||||
// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
|
||||
// prior to starting Sarama.
|
||||
// See Examples on how to use the metrics registry
|
||||
MetricRegistry metrics.Registry
|
||||
}
|
||||
|
||||
// NewConfig returns a new configuration instance with sane defaults.
|
||||
func NewConfig() *Config {
|
||||
c := &Config{}
|
||||
|
||||
c.Net.MaxOpenRequests = 5
|
||||
c.Net.DialTimeout = 30 * time.Second
|
||||
c.Net.ReadTimeout = 30 * time.Second
|
||||
c.Net.WriteTimeout = 30 * time.Second
|
||||
c.Net.SASL.Handshake = true
|
||||
|
||||
c.Metadata.Retry.Max = 3
|
||||
c.Metadata.Retry.Backoff = 250 * time.Millisecond
|
||||
c.Metadata.RefreshFrequency = 10 * time.Minute
|
||||
|
||||
c.Producer.MaxMessageBytes = 1000000
|
||||
c.Producer.RequiredAcks = WaitForLocal
|
||||
c.Producer.Timeout = 10 * time.Second
|
||||
c.Producer.Partitioner = NewHashPartitioner
|
||||
c.Producer.Retry.Max = 3
|
||||
c.Producer.Retry.Backoff = 100 * time.Millisecond
|
||||
c.Producer.Return.Errors = true
|
||||
|
||||
c.Consumer.Fetch.Min = 1
|
||||
c.Consumer.Fetch.Default = 32768
|
||||
c.Consumer.Retry.Backoff = 2 * time.Second
|
||||
c.Consumer.MaxWaitTime = 250 * time.Millisecond
|
||||
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
|
||||
c.Consumer.Return.Errors = false
|
||||
c.Consumer.Offsets.CommitInterval = 1 * time.Second
|
||||
c.Consumer.Offsets.Initial = OffsetNewest
|
||||
|
||||
c.ClientID = defaultClientID
|
||||
c.ChannelBufferSize = 256
|
||||
c.Version = minVersion
|
||||
c.MetricRegistry = metrics.NewRegistry()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Validate checks a Config instance. It will return a
|
||||
// ConfigurationError if the specified values don't make sense.
|
||||
func (c *Config) Validate() error {
|
||||
// some configuration values should be warned on but not fail completely, do those first
|
||||
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
|
||||
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
|
||||
}
|
||||
if c.Net.SASL.Enable == false {
|
||||
if c.Net.SASL.User != "" {
|
||||
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
|
||||
}
|
||||
if c.Net.SASL.Password != "" {
|
||||
Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
|
||||
}
|
||||
}
|
||||
if c.Producer.RequiredAcks > 1 {
|
||||
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
|
||||
}
|
||||
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
|
||||
Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.")
|
||||
}
|
||||
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
|
||||
Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.")
|
||||
}
|
||||
if c.Producer.Timeout%time.Millisecond != 0 {
|
||||
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
|
||||
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
|
||||
}
|
||||
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
|
||||
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
|
||||
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.ClientID == defaultClientID {
|
||||
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
|
||||
}
|
||||
|
||||
// validate Net values
|
||||
switch {
|
||||
case c.Net.MaxOpenRequests <= 0:
|
||||
return ConfigurationError("Net.MaxOpenRequests must be > 0")
|
||||
case c.Net.DialTimeout <= 0:
|
||||
return ConfigurationError("Net.DialTimeout must be > 0")
|
||||
case c.Net.ReadTimeout <= 0:
|
||||
return ConfigurationError("Net.ReadTimeout must be > 0")
|
||||
case c.Net.WriteTimeout <= 0:
|
||||
return ConfigurationError("Net.WriteTimeout must be > 0")
|
||||
case c.Net.KeepAlive < 0:
|
||||
return ConfigurationError("Net.KeepAlive must be >= 0")
|
||||
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
|
||||
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
|
||||
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
|
||||
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
|
||||
}
|
||||
|
||||
// validate the Metadata values
|
||||
switch {
|
||||
case c.Metadata.Retry.Max < 0:
|
||||
return ConfigurationError("Metadata.Retry.Max must be >= 0")
|
||||
case c.Metadata.Retry.Backoff < 0:
|
||||
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
|
||||
case c.Metadata.RefreshFrequency < 0:
|
||||
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
|
||||
}
|
||||
|
||||
// validate the Producer values
|
||||
switch {
|
||||
case c.Producer.MaxMessageBytes <= 0:
|
||||
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
|
||||
case c.Producer.RequiredAcks < -1:
|
||||
return ConfigurationError("Producer.RequiredAcks must be >= -1")
|
||||
case c.Producer.Timeout <= 0:
|
||||
return ConfigurationError("Producer.Timeout must be > 0")
|
||||
case c.Producer.Partitioner == nil:
|
||||
return ConfigurationError("Producer.Partitioner must not be nil")
|
||||
case c.Producer.Flush.Bytes < 0:
|
||||
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
|
||||
case c.Producer.Flush.Messages < 0:
|
||||
return ConfigurationError("Producer.Flush.Messages must be >= 0")
|
||||
case c.Producer.Flush.Frequency < 0:
|
||||
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
|
||||
case c.Producer.Flush.MaxMessages < 0:
|
||||
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
|
||||
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
|
||||
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
|
||||
case c.Producer.Retry.Max < 0:
|
||||
return ConfigurationError("Producer.Retry.Max must be >= 0")
|
||||
case c.Producer.Retry.Backoff < 0:
|
||||
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
|
||||
}
|
||||
|
||||
if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
|
||||
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
|
||||
}
|
||||
|
||||
// validate the Consumer values
|
||||
switch {
|
||||
case c.Consumer.Fetch.Min <= 0:
|
||||
return ConfigurationError("Consumer.Fetch.Min must be > 0")
|
||||
case c.Consumer.Fetch.Default <= 0:
|
||||
return ConfigurationError("Consumer.Fetch.Default must be > 0")
|
||||
case c.Consumer.Fetch.Max < 0:
|
||||
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
|
||||
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
|
||||
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
|
||||
case c.Consumer.MaxProcessingTime <= 0:
|
||||
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
|
||||
case c.Consumer.Retry.Backoff < 0:
|
||||
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
|
||||
case c.Consumer.Offsets.CommitInterval <= 0:
|
||||
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
|
||||
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
|
||||
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
|
||||
|
||||
}
|
||||
|
||||
// validate misc shared values
|
||||
switch {
|
||||
case c.ChannelBufferSize < 0:
|
||||
return ConfigurationError("ChannelBufferSize must be >= 0")
|
||||
case !validID.MatchString(c.ClientID):
|
||||
return ConfigurationError("ClientID is invalid")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
735
vendor/github.com/Shopify/sarama/consumer.go
generated
vendored
Normal file
735
vendor/github.com/Shopify/sarama/consumer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,735 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
|
||||
type ConsumerMessage struct {
|
||||
Key, Value []byte
|
||||
Topic string
|
||||
Partition int32
|
||||
Offset int64
|
||||
Timestamp time.Time // only set if kafka is version 0.10+
|
||||
}
|
||||
|
||||
// ConsumerError is what is provided to the user when an error occurs.
|
||||
// It wraps an error and includes the topic and partition.
|
||||
type ConsumerError struct {
|
||||
Topic string
|
||||
Partition int32
|
||||
Err error
|
||||
}
|
||||
|
||||
func (ce ConsumerError) Error() string {
|
||||
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
|
||||
}
|
||||
|
||||
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
|
||||
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
|
||||
// when stopping.
|
||||
type ConsumerErrors []*ConsumerError
|
||||
|
||||
func (ce ConsumerErrors) Error() string {
|
||||
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
|
||||
}
|
||||
|
||||
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
|
||||
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
//
|
||||
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
|
||||
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
|
||||
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
|
||||
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
|
||||
type Consumer interface {
|
||||
|
||||
// Topics returns the set of available topics as retrieved from the cluster
|
||||
// metadata. This method is the same as Client.Topics(), and is provided for
|
||||
// convenience.
|
||||
Topics() ([]string, error)
|
||||
|
||||
// Partitions returns the sorted list of all partition IDs for the given topic.
|
||||
// This method is the same as Client.Partitions(), and is provided for convenience.
|
||||
Partitions(topic string) ([]int32, error)
|
||||
|
||||
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
|
||||
// the given offset. It will return an error if this Consumer is already consuming
|
||||
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
|
||||
// or OffsetOldest
|
||||
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
|
||||
|
||||
// HighWaterMarks returns the current high water marks for each topic and partition.
|
||||
// Consistency between partitions is not guaranteed since high water marks are updated separately.
|
||||
HighWaterMarks() map[string]map[int32]int64
|
||||
|
||||
// Close shuts down the consumer. It must be called after all child
|
||||
// PartitionConsumers have already been closed.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type consumer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
lock sync.Mutex
|
||||
children map[string]map[int32]*partitionConsumer
|
||||
brokerConsumers map[*Broker]*brokerConsumer
|
||||
}
|
||||
|
||||
// NewConsumer creates a new consumer using the given broker addresses and configuration.
|
||||
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
|
||||
client, err := NewClient(addrs, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := NewConsumerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.(*consumer).ownClient = true
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewConsumerFromClient creates a new consumer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this consumer.
|
||||
func NewConsumerFromClient(client Client) (Consumer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
c := &consumer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
children: make(map[string]map[int32]*partitionConsumer),
|
||||
brokerConsumers: make(map[*Broker]*brokerConsumer),
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *consumer) Close() error {
|
||||
if c.ownClient {
|
||||
return c.client.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consumer) Topics() ([]string, error) {
|
||||
return c.client.Topics()
|
||||
}
|
||||
|
||||
func (c *consumer) Partitions(topic string) ([]int32, error) {
|
||||
return c.client.Partitions(topic)
|
||||
}
|
||||
|
||||
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
|
||||
child := &partitionConsumer{
|
||||
consumer: c,
|
||||
conf: c.conf,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
|
||||
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
|
||||
feeder: make(chan *FetchResponse, 1),
|
||||
trigger: make(chan none, 1),
|
||||
dying: make(chan none),
|
||||
fetchSize: c.conf.Consumer.Fetch.Default,
|
||||
}
|
||||
|
||||
if err := child.chooseStartingOffset(offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var leader *Broker
|
||||
var err error
|
||||
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.addChild(child); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go withRecover(child.dispatcher)
|
||||
go withRecover(child.responseFeeder)
|
||||
|
||||
child.broker = c.refBrokerConsumer(leader)
|
||||
child.broker.input <- child
|
||||
|
||||
return child, nil
|
||||
}
|
||||
|
||||
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
hwms := make(map[string]map[int32]int64)
|
||||
for topic, p := range c.children {
|
||||
hwm := make(map[int32]int64, len(p))
|
||||
for partition, pc := range p {
|
||||
hwm[partition] = pc.HighWaterMarkOffset()
|
||||
}
|
||||
hwms[topic] = hwm
|
||||
}
|
||||
|
||||
return hwms
|
||||
}
|
||||
|
||||
func (c *consumer) addChild(child *partitionConsumer) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
topicChildren := c.children[child.topic]
|
||||
if topicChildren == nil {
|
||||
topicChildren = make(map[int32]*partitionConsumer)
|
||||
c.children[child.topic] = topicChildren
|
||||
}
|
||||
|
||||
if topicChildren[child.partition] != nil {
|
||||
return ConfigurationError("That topic/partition is already being consumed")
|
||||
}
|
||||
|
||||
topicChildren[child.partition] = child
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consumer) removeChild(child *partitionConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.children[child.topic], child.partition)
|
||||
}
|
||||
|
||||
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
bc := c.brokerConsumers[broker]
|
||||
if bc == nil {
|
||||
bc = c.newBrokerConsumer(broker)
|
||||
c.brokerConsumers[broker] = bc
|
||||
}
|
||||
|
||||
bc.refs++
|
||||
|
||||
return bc
|
||||
}
|
||||
|
||||
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
brokerWorker.refs--
|
||||
|
||||
if brokerWorker.refs == 0 {
|
||||
close(brokerWorker.input)
|
||||
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
|
||||
delete(c.brokerConsumers, brokerWorker.broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.brokerConsumers, brokerWorker.broker)
|
||||
}
|
||||
|
||||
// PartitionConsumer
|
||||
|
||||
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
|
||||
// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
|
||||
// when it passes out of scope.
|
||||
//
|
||||
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
|
||||
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
|
||||
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
|
||||
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
|
||||
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
|
||||
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
|
||||
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
|
||||
type PartitionConsumer interface {
|
||||
|
||||
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will
|
||||
// return immediately, after which you should wait until the 'messages' and
|
||||
// 'errors' channel are drained. It is required to call this function, or
|
||||
// Close before a consumer object passes out of scope, as it will otherwise
|
||||
// leak memory. You must call this before calling Close on the underlying client.
|
||||
AsyncClose()
|
||||
|
||||
// Close stops the PartitionConsumer from fetching messages. It is required to
|
||||
// call this function (or AsyncClose) before a consumer object passes out of
|
||||
// scope, as it will otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
Close() error
|
||||
|
||||
// Messages returns the read channel for the messages that are returned by
|
||||
// the broker.
|
||||
Messages() <-chan *ConsumerMessage
|
||||
|
||||
// Errors returns a read channel of errors that occurred during consuming, if
|
||||
// enabled. By default, errors are logged and not returned over this channel.
|
||||
// If you want to implement any custom error handling, set your config's
|
||||
// Consumer.Return.Errors setting to true, and read from this channel.
|
||||
Errors() <-chan *ConsumerError
|
||||
|
||||
// HighWaterMarkOffset returns the high water mark offset of the partition,
|
||||
// i.e. the offset that will be used for the next message that will be produced.
|
||||
// You can use this to determine how far behind the processing is.
|
||||
HighWaterMarkOffset() int64
|
||||
}
|
||||
|
||||
type partitionConsumer struct {
|
||||
consumer *consumer
|
||||
conf *Config
|
||||
topic string
|
||||
partition int32
|
||||
|
||||
broker *brokerConsumer
|
||||
messages chan *ConsumerMessage
|
||||
errors chan *ConsumerError
|
||||
feeder chan *FetchResponse
|
||||
|
||||
trigger, dying chan none
|
||||
responseResult error
|
||||
|
||||
fetchSize int32
|
||||
offset int64
|
||||
highWaterMarkOffset int64
|
||||
}
|
||||
|
||||
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
|
||||
|
||||
func (child *partitionConsumer) sendError(err error) {
|
||||
cErr := &ConsumerError{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Err: err,
|
||||
}
|
||||
|
||||
if child.conf.Consumer.Return.Errors {
|
||||
child.errors <- cErr
|
||||
} else {
|
||||
Logger.Println(cErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) dispatcher() {
|
||||
for _ = range child.trigger {
|
||||
select {
|
||||
case <-child.dying:
|
||||
close(child.trigger)
|
||||
case <-time.After(child.conf.Consumer.Retry.Backoff):
|
||||
if child.broker != nil {
|
||||
child.consumer.unrefBrokerConsumer(child.broker)
|
||||
child.broker = nil
|
||||
}
|
||||
|
||||
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
|
||||
if err := child.dispatch(); err != nil {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if child.broker != nil {
|
||||
child.consumer.unrefBrokerConsumer(child.broker)
|
||||
}
|
||||
child.consumer.removeChild(child)
|
||||
close(child.feeder)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) dispatch() error {
|
||||
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var leader *Broker
|
||||
var err error
|
||||
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
child.broker = child.consumer.refBrokerConsumer(leader)
|
||||
|
||||
child.broker.input <- child
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
|
||||
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case offset == OffsetNewest:
|
||||
child.offset = newestOffset
|
||||
case offset == OffsetOldest:
|
||||
child.offset = oldestOffset
|
||||
case offset >= oldestOffset && offset <= newestOffset:
|
||||
child.offset = offset
|
||||
default:
|
||||
return ErrOffsetOutOfRange
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
|
||||
return child.messages
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
|
||||
return child.errors
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) AsyncClose() {
|
||||
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
|
||||
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
|
||||
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
|
||||
// also just close itself)
|
||||
close(child.dying)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Close() error {
|
||||
child.AsyncClose()
|
||||
|
||||
go withRecover(func() {
|
||||
for _ = range child.messages {
|
||||
// drain
|
||||
}
|
||||
})
|
||||
|
||||
var errors ConsumerErrors
|
||||
for err := range child.errors {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
|
||||
return atomic.LoadInt64(&child.highWaterMarkOffset)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) responseFeeder() {
|
||||
var msgs []*ConsumerMessage
|
||||
expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
|
||||
expireTimedOut := false
|
||||
|
||||
feederLoop:
|
||||
for response := range child.feeder {
|
||||
msgs, child.responseResult = child.parseResponse(response)
|
||||
|
||||
for i, msg := range msgs {
|
||||
if !expiryTimer.Stop() && !expireTimedOut {
|
||||
// expiryTimer was expired; clear out the waiting msg
|
||||
<-expiryTimer.C
|
||||
}
|
||||
expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
|
||||
expireTimedOut = false
|
||||
|
||||
select {
|
||||
case child.messages <- msg:
|
||||
case <-expiryTimer.C:
|
||||
expireTimedOut = true
|
||||
child.responseResult = errTimedOut
|
||||
child.broker.acks.Done()
|
||||
for _, msg = range msgs[i:] {
|
||||
child.messages <- msg
|
||||
}
|
||||
child.broker.input <- child
|
||||
continue feederLoop
|
||||
}
|
||||
}
|
||||
|
||||
child.broker.acks.Done()
|
||||
}
|
||||
|
||||
close(child.messages)
|
||||
close(child.errors)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
|
||||
block := response.GetBlock(child.topic, child.partition)
|
||||
if block == nil {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if block.Err != ErrNoError {
|
||||
return nil, block.Err
|
||||
}
|
||||
|
||||
if len(block.MsgSet.Messages) == 0 {
|
||||
// We got no messages. If we got a trailing one then we need to ask for more data.
|
||||
// Otherwise we just poll again and wait for one to be produced...
|
||||
if block.MsgSet.PartialTrailingMessage {
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
|
||||
// we can't ask for more data, we've hit the configured limit
|
||||
child.sendError(ErrMessageTooLarge)
|
||||
child.offset++ // skip this one so we can keep processing future messages
|
||||
} else {
|
||||
child.fetchSize *= 2
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// we got messages, reset our fetch size in case it was increased for a previous request
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Default
|
||||
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
|
||||
|
||||
incomplete := false
|
||||
prelude := true
|
||||
var messages []*ConsumerMessage
|
||||
for _, msgBlock := range block.MsgSet.Messages {
|
||||
|
||||
for _, msg := range msgBlock.Messages() {
|
||||
offset := msg.Offset
|
||||
if msg.Msg.Version >= 1 {
|
||||
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
|
||||
offset += baseOffset
|
||||
}
|
||||
if prelude && offset < child.offset {
|
||||
continue
|
||||
}
|
||||
prelude = false
|
||||
|
||||
if offset >= child.offset {
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: msg.Msg.Key,
|
||||
Value: msg.Msg.Value,
|
||||
Offset: offset,
|
||||
Timestamp: msg.Msg.Timestamp,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
} else {
|
||||
incomplete = true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if incomplete || len(messages) == 0 {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// brokerConsumer
|
||||
|
||||
type brokerConsumer struct {
|
||||
consumer *consumer
|
||||
broker *Broker
|
||||
input chan *partitionConsumer
|
||||
newSubscriptions chan []*partitionConsumer
|
||||
wait chan none
|
||||
subscriptions map[*partitionConsumer]none
|
||||
acks sync.WaitGroup
|
||||
refs int
|
||||
}
|
||||
|
||||
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
|
||||
bc := &brokerConsumer{
|
||||
consumer: c,
|
||||
broker: broker,
|
||||
input: make(chan *partitionConsumer),
|
||||
newSubscriptions: make(chan []*partitionConsumer),
|
||||
wait: make(chan none),
|
||||
subscriptions: make(map[*partitionConsumer]none),
|
||||
refs: 0,
|
||||
}
|
||||
|
||||
go withRecover(bc.subscriptionManager)
|
||||
go withRecover(bc.subscriptionConsumer)
|
||||
|
||||
return bc
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) subscriptionManager() {
|
||||
var buffer []*partitionConsumer
|
||||
|
||||
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
|
||||
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
|
||||
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
|
||||
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
|
||||
// so the main goroutine can block waiting for work if it has none.
|
||||
for {
|
||||
if len(buffer) > 0 {
|
||||
select {
|
||||
case event, ok := <-bc.input:
|
||||
if !ok {
|
||||
goto done
|
||||
}
|
||||
buffer = append(buffer, event)
|
||||
case bc.newSubscriptions <- buffer:
|
||||
buffer = nil
|
||||
case bc.wait <- none{}:
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case event, ok := <-bc.input:
|
||||
if !ok {
|
||||
goto done
|
||||
}
|
||||
buffer = append(buffer, event)
|
||||
case bc.newSubscriptions <- nil:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
close(bc.wait)
|
||||
if len(buffer) > 0 {
|
||||
bc.newSubscriptions <- buffer
|
||||
}
|
||||
close(bc.newSubscriptions)
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) subscriptionConsumer() {
|
||||
<-bc.wait // wait for our first piece of work
|
||||
|
||||
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
|
||||
for newSubscriptions := range bc.newSubscriptions {
|
||||
bc.updateSubscriptions(newSubscriptions)
|
||||
|
||||
if len(bc.subscriptions) == 0 {
|
||||
// We're about to be shut down or we're about to receive more subscriptions.
|
||||
// Either way, the signal just hasn't propagated to our goroutine yet.
|
||||
<-bc.wait
|
||||
continue
|
||||
}
|
||||
|
||||
response, err := bc.fetchNewMessages()
|
||||
|
||||
if err != nil {
|
||||
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
|
||||
bc.abort(err)
|
||||
return
|
||||
}
|
||||
|
||||
bc.acks.Add(len(bc.subscriptions))
|
||||
for child := range bc.subscriptions {
|
||||
child.feeder <- response
|
||||
}
|
||||
bc.acks.Wait()
|
||||
bc.handleResponses()
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
|
||||
for _, child := range newSubscriptions {
|
||||
bc.subscriptions[child] = none{}
|
||||
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
|
||||
}
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
select {
|
||||
case <-child.dying:
|
||||
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
|
||||
close(child.trigger)
|
||||
delete(bc.subscriptions, child)
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) handleResponses() {
|
||||
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
|
||||
for child := range bc.subscriptions {
|
||||
result := child.responseResult
|
||||
child.responseResult = nil
|
||||
|
||||
switch result {
|
||||
case nil:
|
||||
break
|
||||
case errTimedOut:
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
|
||||
bc.broker.ID(), child.topic, child.partition)
|
||||
delete(bc.subscriptions, child)
|
||||
case ErrOffsetOutOfRange:
|
||||
// there's no point in retrying this it will just fail the same way again
|
||||
// shut it down and force the user to choose what to do
|
||||
child.sendError(result)
|
||||
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
|
||||
close(child.trigger)
|
||||
delete(bc.subscriptions, child)
|
||||
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
|
||||
// not an error, but does need redispatching
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
|
||||
bc.broker.ID(), child.topic, child.partition, result)
|
||||
child.trigger <- none{}
|
||||
delete(bc.subscriptions, child)
|
||||
default:
|
||||
// dunno, tell the user and try redispatching
|
||||
child.sendError(result)
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
|
||||
bc.broker.ID(), child.topic, child.partition, result)
|
||||
child.trigger <- none{}
|
||||
delete(bc.subscriptions, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) abort(err error) {
|
||||
bc.consumer.abandonBrokerConsumer(bc)
|
||||
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
|
||||
for newSubscriptions := range bc.newSubscriptions {
|
||||
if len(newSubscriptions) == 0 {
|
||||
<-bc.wait
|
||||
continue
|
||||
}
|
||||
for _, child := range newSubscriptions {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
|
||||
request := &FetchRequest{
|
||||
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
|
||||
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
request.Version = 2
|
||||
}
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
|
||||
}
|
||||
|
||||
return bc.broker.Fetch(request)
|
||||
}
|
||||
94
vendor/github.com/Shopify/sarama/consumer_group_members.go
generated
vendored
Normal file
94
vendor/github.com/Shopify/sarama/consumer_group_members.go
generated
vendored
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
package sarama
|
||||
|
||||
type ConsumerGroupMemberMetadata struct {
|
||||
Version int16
|
||||
Topics []string
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
|
||||
pe.putInt16(m.Version)
|
||||
|
||||
if err := pe.putStringArray(m.Topics); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putBytes(m.UserData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
|
||||
if m.Version, err = pd.getInt16(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m.Topics, err = pd.getStringArray(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m.UserData, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConsumerGroupMemberAssignment struct {
|
||||
Version int16
|
||||
Topics map[string][]int32
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
|
||||
pe.putInt16(m.Version)
|
||||
|
||||
if err := pe.putArrayLength(len(m.Topics)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range m.Topics {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putInt32Array(partitions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := pe.putBytes(m.UserData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
|
||||
if m.Version, err = pd.getInt16(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var topicLen int
|
||||
if topicLen, err = pd.getArrayLength(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.Topics = make(map[string][]int32, topicLen)
|
||||
for i := 0; i < topicLen; i++ {
|
||||
var topic string
|
||||
if topic, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if m.UserData, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
26
vendor/github.com/Shopify/sarama/consumer_metadata_request.go
generated
vendored
Normal file
26
vendor/github.com/Shopify/sarama/consumer_metadata_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
package sarama
|
||||
|
||||
type ConsumerMetadataRequest struct {
|
||||
ConsumerGroup string
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
|
||||
return pe.putString(r.ConsumerGroup)
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.ConsumerGroup, err = pd.getString()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) key() int16 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
|
||||
return V0_8_2_0
|
||||
}
|
||||
85
vendor/github.com/Shopify/sarama/consumer_metadata_response.go
generated
vendored
Normal file
85
vendor/github.com/Shopify/sarama/consumer_metadata_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ConsumerMetadataResponse struct {
|
||||
Err KError
|
||||
Coordinator *Broker
|
||||
CoordinatorID int32 // deprecated: use Coordinator.ID()
|
||||
CoordinatorHost string // deprecated: use Coordinator.Addr()
|
||||
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Err = KError(tmp)
|
||||
|
||||
coordinator := new(Broker)
|
||||
if err := coordinator.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
if coordinator.addr == ":0" {
|
||||
return nil
|
||||
}
|
||||
r.Coordinator = coordinator
|
||||
|
||||
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
|
||||
// backwards compatibility
|
||||
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.ParseInt(portstr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.CoordinatorID = r.Coordinator.ID()
|
||||
r.CoordinatorHost = host
|
||||
r.CoordinatorPort = int32(port)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
if r.Coordinator != nil {
|
||||
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.ParseInt(portstr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.Coordinator.ID())
|
||||
if err := pe.putString(host); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(int32(port))
|
||||
return nil
|
||||
}
|
||||
pe.putInt32(r.CoordinatorID)
|
||||
if err := pe.putString(r.CoordinatorHost); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.CoordinatorPort)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) key() int16 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
|
||||
return V0_8_2_0
|
||||
}
|
||||
36
vendor/github.com/Shopify/sarama/crc32_field.go
generated
vendored
Normal file
36
vendor/github.com/Shopify/sarama/crc32_field.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/klauspost/crc32"
|
||||
)
|
||||
|
||||
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
|
||||
type crc32Field struct {
|
||||
startOffset int
|
||||
}
|
||||
|
||||
func (c *crc32Field) saveOffset(in int) {
|
||||
c.startOffset = in
|
||||
}
|
||||
|
||||
func (c *crc32Field) reserveLength() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (c *crc32Field) run(curOffset int, buf []byte) error {
|
||||
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
|
||||
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *crc32Field) check(curOffset int, buf []byte) error {
|
||||
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
|
||||
|
||||
if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
|
||||
return PacketDecodingError{"CRC didn't match"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
30
vendor/github.com/Shopify/sarama/describe_groups_request.go
generated
vendored
Normal file
30
vendor/github.com/Shopify/sarama/describe_groups_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package sarama
|
||||
|
||||
type DescribeGroupsRequest struct {
|
||||
Groups []string
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
|
||||
return pe.putStringArray(r.Groups)
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Groups, err = pd.getStringArray()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) key() int16 {
|
||||
return 15
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) AddGroup(group string) {
|
||||
r.Groups = append(r.Groups, group)
|
||||
}
|
||||
186
vendor/github.com/Shopify/sarama/describe_groups_response.go
generated
vendored
Normal file
186
vendor/github.com/Shopify/sarama/describe_groups_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
package sarama
|
||||
|
||||
type DescribeGroupsResponse struct {
|
||||
Groups []*GroupDescription
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Groups)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, groupDescription := range r.Groups {
|
||||
if err := groupDescription.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Groups = make([]*GroupDescription, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Groups[i] = new(GroupDescription)
|
||||
if err := r.Groups[i].decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) key() int16 {
|
||||
return 15
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
type GroupDescription struct {
|
||||
Err KError
|
||||
GroupId string
|
||||
State string
|
||||
ProtocolType string
|
||||
Protocol string
|
||||
Members map[string]*GroupMemberDescription
|
||||
}
|
||||
|
||||
func (gd *GroupDescription) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(gd.Err))
|
||||
|
||||
if err := pe.putString(gd.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.State); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.ProtocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.Protocol); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(gd.Members)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for memberId, groupMemberDescription := range gd.Members {
|
||||
if err := pe.putString(memberId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := groupMemberDescription.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
gd.Err = KError(kerr)
|
||||
}
|
||||
|
||||
if gd.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.State, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.ProtocolType, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.Protocol, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
gd.Members = make(map[string]*GroupMemberDescription)
|
||||
for i := 0; i < n; i++ {
|
||||
memberId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gd.Members[memberId] = new(GroupMemberDescription)
|
||||
if err := gd.Members[memberId].decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type GroupMemberDescription struct {
|
||||
ClientId string
|
||||
ClientHost string
|
||||
MemberMetadata []byte
|
||||
MemberAssignment []byte
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(gmd.ClientId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gmd.ClientHost); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(gmd.MemberMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(gmd.MemberAssignment); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
|
||||
if gmd.ClientId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.ClientHost, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
|
||||
assignment := new(ConsumerGroupMemberAssignment)
|
||||
err := decode(gmd.MemberAssignment, assignment)
|
||||
return assignment, err
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
|
||||
metadata := new(ConsumerGroupMemberMetadata)
|
||||
err := decode(gmd.MemberMetadata, metadata)
|
||||
return metadata, err
|
||||
}
|
||||
13
vendor/github.com/Shopify/sarama/dev.yml
generated
vendored
Normal file
13
vendor/github.com/Shopify/sarama/dev.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
name: sarama
|
||||
|
||||
up:
|
||||
- go: 1.7.3
|
||||
|
||||
commands:
|
||||
test:
|
||||
run: make test
|
||||
desc: 'run unit tests'
|
||||
|
||||
packages:
|
||||
- git@github.com:Shopify/dev-shopify.git
|
||||
|
||||
89
vendor/github.com/Shopify/sarama/encoder_decoder.go
generated
vendored
Normal file
89
vendor/github.com/Shopify/sarama/encoder_decoder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Encoder is the interface that wraps the basic Encode method.
|
||||
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
|
||||
type encoder interface {
|
||||
encode(pe packetEncoder) error
|
||||
}
|
||||
|
||||
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
|
||||
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
|
||||
if e == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var prepEnc prepEncoder
|
||||
var realEnc realEncoder
|
||||
|
||||
err := e.encode(&prepEnc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
|
||||
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
|
||||
}
|
||||
|
||||
realEnc.raw = make([]byte, prepEnc.length)
|
||||
realEnc.registry = metricRegistry
|
||||
err = e.encode(&realEnc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return realEnc.raw, nil
|
||||
}
|
||||
|
||||
// Decoder is the interface that wraps the basic Decode method.
|
||||
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
|
||||
type decoder interface {
|
||||
decode(pd packetDecoder) error
|
||||
}
|
||||
|
||||
type versionedDecoder interface {
|
||||
decode(pd packetDecoder, version int16) error
|
||||
}
|
||||
|
||||
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
|
||||
// interpreted using Kafka's encoding rules.
|
||||
func decode(buf []byte, in decoder) error {
|
||||
if buf == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
helper := realDecoder{raw: buf}
|
||||
err := in.decode(&helper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helper.off != len(buf) {
|
||||
return PacketDecodingError{"invalid length"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
|
||||
if buf == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
helper := realDecoder{raw: buf}
|
||||
err := in.decode(&helper, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helper.off != len(buf) {
|
||||
return PacketDecodingError{"invalid length"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
197
vendor/github.com/Shopify/sarama/errors.go
generated
vendored
Normal file
197
vendor/github.com/Shopify/sarama/errors.go
generated
vendored
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
|
||||
// or otherwise failed to respond.
|
||||
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
|
||||
|
||||
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
|
||||
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
|
||||
|
||||
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
|
||||
// not contain the expected information.
|
||||
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
|
||||
|
||||
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
|
||||
// (meaning one outside of the range [0...numPartitions-1]).
|
||||
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
|
||||
|
||||
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
|
||||
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
|
||||
|
||||
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
|
||||
var ErrNotConnected = errors.New("kafka: broker not connected")
|
||||
|
||||
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
|
||||
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
|
||||
// of the message set.
|
||||
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
|
||||
|
||||
// ErrShuttingDown is returned when a producer receives a message during shutdown.
|
||||
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
|
||||
|
||||
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
|
||||
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
|
||||
|
||||
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
|
||||
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
|
||||
type PacketEncodingError struct {
|
||||
Info string
|
||||
}
|
||||
|
||||
func (err PacketEncodingError) Error() string {
|
||||
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
|
||||
}
|
||||
|
||||
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
|
||||
// This can be a bad CRC or length field, or any other invalid value.
|
||||
type PacketDecodingError struct {
|
||||
Info string
|
||||
}
|
||||
|
||||
func (err PacketDecodingError) Error() string {
|
||||
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
|
||||
}
|
||||
|
||||
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
|
||||
// when the specified configuration is invalid.
|
||||
type ConfigurationError string
|
||||
|
||||
func (err ConfigurationError) Error() string {
|
||||
return "kafka: invalid configuration (" + string(err) + ")"
|
||||
}
|
||||
|
||||
// KError is the type of error that can be returned directly by the Kafka broker.
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
|
||||
type KError int16
|
||||
|
||||
// Numeric error codes returned by the Kafka server.
|
||||
const (
|
||||
ErrNoError KError = 0
|
||||
ErrUnknown KError = -1
|
||||
ErrOffsetOutOfRange KError = 1
|
||||
ErrInvalidMessage KError = 2
|
||||
ErrUnknownTopicOrPartition KError = 3
|
||||
ErrInvalidMessageSize KError = 4
|
||||
ErrLeaderNotAvailable KError = 5
|
||||
ErrNotLeaderForPartition KError = 6
|
||||
ErrRequestTimedOut KError = 7
|
||||
ErrBrokerNotAvailable KError = 8
|
||||
ErrReplicaNotAvailable KError = 9
|
||||
ErrMessageSizeTooLarge KError = 10
|
||||
ErrStaleControllerEpochCode KError = 11
|
||||
ErrOffsetMetadataTooLarge KError = 12
|
||||
ErrNetworkException KError = 13
|
||||
ErrOffsetsLoadInProgress KError = 14
|
||||
ErrConsumerCoordinatorNotAvailable KError = 15
|
||||
ErrNotCoordinatorForConsumer KError = 16
|
||||
ErrInvalidTopic KError = 17
|
||||
ErrMessageSetSizeTooLarge KError = 18
|
||||
ErrNotEnoughReplicas KError = 19
|
||||
ErrNotEnoughReplicasAfterAppend KError = 20
|
||||
ErrInvalidRequiredAcks KError = 21
|
||||
ErrIllegalGeneration KError = 22
|
||||
ErrInconsistentGroupProtocol KError = 23
|
||||
ErrInvalidGroupId KError = 24
|
||||
ErrUnknownMemberId KError = 25
|
||||
ErrInvalidSessionTimeout KError = 26
|
||||
ErrRebalanceInProgress KError = 27
|
||||
ErrInvalidCommitOffsetSize KError = 28
|
||||
ErrTopicAuthorizationFailed KError = 29
|
||||
ErrGroupAuthorizationFailed KError = 30
|
||||
ErrClusterAuthorizationFailed KError = 31
|
||||
ErrInvalidTimestamp KError = 32
|
||||
ErrUnsupportedSASLMechanism KError = 33
|
||||
ErrIllegalSASLState KError = 34
|
||||
ErrUnsupportedVersion KError = 35
|
||||
ErrUnsupportedForMessageFormat KError = 43
|
||||
)
|
||||
|
||||
func (err KError) Error() string {
|
||||
// Error messages stolen/adapted from
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
|
||||
switch err {
|
||||
case ErrNoError:
|
||||
return "kafka server: Not an error, why are you printing me?"
|
||||
case ErrUnknown:
|
||||
return "kafka server: Unexpected (unknown?) server error."
|
||||
case ErrOffsetOutOfRange:
|
||||
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
|
||||
case ErrInvalidMessage:
|
||||
return "kafka server: Message contents does not match its CRC."
|
||||
case ErrUnknownTopicOrPartition:
|
||||
return "kafka server: Request was for a topic or partition that does not exist on this broker."
|
||||
case ErrInvalidMessageSize:
|
||||
return "kafka server: The message has a negative size."
|
||||
case ErrLeaderNotAvailable:
|
||||
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
|
||||
case ErrNotLeaderForPartition:
|
||||
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
|
||||
case ErrRequestTimedOut:
|
||||
return "kafka server: Request exceeded the user-specified time limit in the request."
|
||||
case ErrBrokerNotAvailable:
|
||||
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
|
||||
case ErrReplicaNotAvailable:
|
||||
return "kafka server: Replica information not available, one or more brokers are down."
|
||||
case ErrMessageSizeTooLarge:
|
||||
return "kafka server: Message was too large, server rejected it to avoid allocation error."
|
||||
case ErrStaleControllerEpochCode:
|
||||
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
|
||||
case ErrOffsetMetadataTooLarge:
|
||||
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
|
||||
case ErrNetworkException:
|
||||
return "kafka server: The server disconnected before a response was received."
|
||||
case ErrOffsetsLoadInProgress:
|
||||
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
|
||||
case ErrConsumerCoordinatorNotAvailable:
|
||||
return "kafka server: Offset's topic has not yet been created."
|
||||
case ErrNotCoordinatorForConsumer:
|
||||
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
|
||||
case ErrInvalidTopic:
|
||||
return "kafka server: The request attempted to perform an operation on an invalid topic."
|
||||
case ErrMessageSetSizeTooLarge:
|
||||
return "kafka server: The request included message batch larger than the configured segment size on the server."
|
||||
case ErrNotEnoughReplicas:
|
||||
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
|
||||
case ErrNotEnoughReplicasAfterAppend:
|
||||
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
|
||||
case ErrInvalidRequiredAcks:
|
||||
return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
|
||||
case ErrIllegalGeneration:
|
||||
return "kafka server: The provided generation id is not the current generation."
|
||||
case ErrInconsistentGroupProtocol:
|
||||
return "kafka server: The provider group protocol type is incompatible with the other members."
|
||||
case ErrInvalidGroupId:
|
||||
return "kafka server: The provided group id was empty."
|
||||
case ErrUnknownMemberId:
|
||||
return "kafka server: The provided member is not known in the current generation."
|
||||
case ErrInvalidSessionTimeout:
|
||||
return "kafka server: The provided session timeout is outside the allowed range."
|
||||
case ErrRebalanceInProgress:
|
||||
return "kafka server: A rebalance for the group is in progress. Please re-join the group."
|
||||
case ErrInvalidCommitOffsetSize:
|
||||
return "kafka server: The provided commit metadata was too large."
|
||||
case ErrTopicAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to access this topic."
|
||||
case ErrGroupAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to access this group."
|
||||
case ErrClusterAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to send this request type."
|
||||
case ErrInvalidTimestamp:
|
||||
return "kafka server: The timestamp of the message is out of acceptable range."
|
||||
case ErrUnsupportedSASLMechanism:
|
||||
return "kafka server: The broker does not support the requested SASL mechanism."
|
||||
case ErrIllegalSASLState:
|
||||
return "kafka server: Request is not valid given the current SASL state."
|
||||
case ErrUnsupportedVersion:
|
||||
return "kafka server: The version of API is not supported."
|
||||
case ErrUnsupportedForMessageFormat:
|
||||
return "kafka server: The requested operation is not supported by the message format version."
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
|
||||
}
|
||||
136
vendor/github.com/Shopify/sarama/fetch_request.go
generated
vendored
Normal file
136
vendor/github.com/Shopify/sarama/fetch_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
package sarama
|
||||
|
||||
type fetchRequestBlock struct {
|
||||
fetchOffset int64
|
||||
maxBytes int32
|
||||
}
|
||||
|
||||
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt64(b.fetchOffset)
|
||||
pe.putInt32(b.maxBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
|
||||
if b.fetchOffset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.maxBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FetchRequest struct {
|
||||
MaxWaitTime int32
|
||||
MinBytes int32
|
||||
Version int16
|
||||
blocks map[string]map[int32]*fetchRequestBlock
|
||||
}
|
||||
|
||||
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt32(-1) // replica ID is always -1 for clients
|
||||
pe.putInt32(r.MaxWaitTime)
|
||||
pe.putInt32(r.MinBytes)
|
||||
err = pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, blocks := range r.blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range blocks {
|
||||
pe.putInt32(partition)
|
||||
err = block.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
if _, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.MaxWaitTime, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.MinBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fetchBlock := &fetchRequestBlock{}
|
||||
if err = fetchBlock.decode(pd); err != nil {
|
||||
return nil
|
||||
}
|
||||
r.blocks[topic][partition] = fetchBlock
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchRequest) key() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *FetchRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *FetchRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
|
||||
}
|
||||
|
||||
tmp := new(fetchRequestBlock)
|
||||
tmp.maxBytes = maxBytes
|
||||
tmp.fetchOffset = fetchOffset
|
||||
|
||||
r.blocks[topic][partitionID] = tmp
|
||||
}
|
||||
210
vendor/github.com/Shopify/sarama/fetch_response.go
generated
vendored
Normal file
210
vendor/github.com/Shopify/sarama/fetch_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type FetchResponseBlock struct {
|
||||
Err KError
|
||||
HighWaterMarkOffset int64
|
||||
MsgSet MessageSet
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
b.HighWaterMarkOffset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgSetSize, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = (&b.MsgSet).decode(msgSetDecoder)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
pe.putInt64(b.HighWaterMarkOffset)
|
||||
|
||||
pe.push(&lengthField{})
|
||||
err = b.MsgSet.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
type FetchResponse struct {
|
||||
Blocks map[string]map[int32]*FetchResponseBlock
|
||||
ThrottleTime time.Duration
|
||||
Version int16 // v1 requires 0.9+, v2 requires 0.10+
|
||||
}
|
||||
|
||||
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.Version >= 1 {
|
||||
throttle, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
|
||||
}
|
||||
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(FetchResponseBlock)
|
||||
err = block.decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchResponse) encode(pe packetEncoder) (err error) {
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(r.Blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range r.Blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, block := range partitions {
|
||||
pe.putInt32(id)
|
||||
err = block.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchResponse) key() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *FetchResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *FetchResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
|
||||
}
|
||||
partitions, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
partitions = make(map[int32]*FetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
frb, ok := partitions[partition]
|
||||
if !ok {
|
||||
frb = new(FetchResponseBlock)
|
||||
partitions[partition] = frb
|
||||
}
|
||||
frb.Err = err
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
|
||||
}
|
||||
partitions, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
partitions = make(map[int32]*FetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
frb, ok := partitions[partition]
|
||||
if !ok {
|
||||
frb = new(FetchResponseBlock)
|
||||
partitions[partition] = frb
|
||||
}
|
||||
var kb []byte
|
||||
var vb []byte
|
||||
if key != nil {
|
||||
kb, _ = key.Encode()
|
||||
}
|
||||
if value != nil {
|
||||
vb, _ = value.Encode()
|
||||
}
|
||||
msg := &Message{Key: kb, Value: vb}
|
||||
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
|
||||
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
|
||||
}
|
||||
47
vendor/github.com/Shopify/sarama/heartbeat_request.go
generated
vendored
Normal file
47
vendor/github.com/Shopify/sarama/heartbeat_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package sarama
|
||||
|
||||
type HeartbeatRequest struct {
|
||||
GroupId string
|
||||
GenerationId int32
|
||||
MemberId string
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) key() int16 {
|
||||
return 12
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
32
vendor/github.com/Shopify/sarama/heartbeat_response.go
generated
vendored
Normal file
32
vendor/github.com/Shopify/sarama/heartbeat_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
package sarama
|
||||
|
||||
type HeartbeatResponse struct {
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) key() int16 {
|
||||
return 12
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
108
vendor/github.com/Shopify/sarama/join_group_request.go
generated
vendored
Normal file
108
vendor/github.com/Shopify/sarama/join_group_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
package sarama
|
||||
|
||||
type JoinGroupRequest struct {
|
||||
GroupId string
|
||||
SessionTimeout int32
|
||||
MemberId string
|
||||
ProtocolType string
|
||||
GroupProtocols map[string][]byte
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.SessionTimeout)
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.ProtocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
|
||||
return err
|
||||
}
|
||||
for name, metadata := range r.GroupProtocols {
|
||||
if err := pe.putString(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.SessionTimeout, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ProtocolType, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.GroupProtocols = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metadata, err := pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.GroupProtocols[name] = metadata
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) key() int16 {
|
||||
return 11
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
|
||||
if r.GroupProtocols == nil {
|
||||
r.GroupProtocols = make(map[string][]byte)
|
||||
}
|
||||
|
||||
r.GroupProtocols[name] = metadata
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
|
||||
bin, err := encode(metadata, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.AddGroupProtocol(name, bin)
|
||||
return nil
|
||||
}
|
||||
114
vendor/github.com/Shopify/sarama/join_group_response.go
generated
vendored
Normal file
114
vendor/github.com/Shopify/sarama/join_group_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
package sarama
|
||||
|
||||
type JoinGroupResponse struct {
|
||||
Err KError
|
||||
GenerationId int32
|
||||
GroupProtocol string
|
||||
LeaderId string
|
||||
MemberId string
|
||||
Members map[string][]byte
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
|
||||
members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
|
||||
for id, bin := range r.Members {
|
||||
meta := new(ConsumerGroupMemberMetadata)
|
||||
if err := decode(bin, meta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
members[id] = *meta
|
||||
}
|
||||
return members, nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
if err := pe.putString(r.GroupProtocol); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.LeaderId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.Members)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for memberId, memberMetadata := range r.Members {
|
||||
if err := pe.putString(memberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putBytes(memberMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.GroupProtocol, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.LeaderId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Members = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
memberId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
memberMetadata, err := pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Members[memberId] = memberMetadata
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) key() int16 {
|
||||
return 11
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
40
vendor/github.com/Shopify/sarama/leave_group_request.go
generated
vendored
Normal file
40
vendor/github.com/Shopify/sarama/leave_group_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
package sarama
|
||||
|
||||
type LeaveGroupRequest struct {
|
||||
GroupId string
|
||||
MemberId string
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) key() int16 {
|
||||
return 13
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
32
vendor/github.com/Shopify/sarama/leave_group_response.go
generated
vendored
Normal file
32
vendor/github.com/Shopify/sarama/leave_group_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
package sarama
|
||||
|
||||
type LeaveGroupResponse struct {
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) key() int16 {
|
||||
return 13
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
29
vendor/github.com/Shopify/sarama/length_field.go
generated
vendored
Normal file
29
vendor/github.com/Shopify/sarama/length_field.go
generated
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
package sarama
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
|
||||
type lengthField struct {
|
||||
startOffset int
|
||||
}
|
||||
|
||||
func (l *lengthField) saveOffset(in int) {
|
||||
l.startOffset = in
|
||||
}
|
||||
|
||||
func (l *lengthField) reserveLength() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (l *lengthField) run(curOffset int, buf []byte) error {
|
||||
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lengthField) check(curOffset int, buf []byte) error {
|
||||
if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
|
||||
return PacketDecodingError{"length field invalid"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
24
vendor/github.com/Shopify/sarama/list_groups_request.go
generated
vendored
Normal file
24
vendor/github.com/Shopify/sarama/list_groups_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package sarama
|
||||
|
||||
type ListGroupsRequest struct {
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) encode(pe packetEncoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) key() int16 {
|
||||
return 16
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
68
vendor/github.com/Shopify/sarama/list_groups_response.go
generated
vendored
Normal file
68
vendor/github.com/Shopify/sarama/list_groups_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
package sarama
|
||||
|
||||
type ListGroupsResponse struct {
|
||||
Err KError
|
||||
Groups map[string]string
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
|
||||
if err := pe.putArrayLength(len(r.Groups)); err != nil {
|
||||
return err
|
||||
}
|
||||
for groupId, protocolType := range r.Groups {
|
||||
if err := pe.putString(groupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(protocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Groups = make(map[string]string)
|
||||
for i := 0; i < n; i++ {
|
||||
groupId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protocolType, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Groups[groupId] = protocolType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) key() int16 {
|
||||
return 16
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
196
vendor/github.com/Shopify/sarama/message.go
generated
vendored
Normal file
196
vendor/github.com/Shopify/sarama/message.go
generated
vendored
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-xerial-snappy"
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
|
||||
type CompressionCodec int8
|
||||
|
||||
// only the last two bits are really used
|
||||
const compressionCodecMask int8 = 0x03
|
||||
|
||||
const (
|
||||
CompressionNone CompressionCodec = 0
|
||||
CompressionGZIP CompressionCodec = 1
|
||||
CompressionSnappy CompressionCodec = 2
|
||||
CompressionLZ4 CompressionCodec = 3
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
Codec CompressionCodec // codec used to compress the message contents
|
||||
Key []byte // the message key, may be nil
|
||||
Value []byte // the message contents
|
||||
Set *MessageSet // the message set a message might wrap
|
||||
Version int8 // v1 requires Kafka 0.10
|
||||
Timestamp time.Time // the timestamp of the message (version 1+ only)
|
||||
|
||||
compressedCache []byte
|
||||
compressedSize int // used for computing the compression ratio metrics
|
||||
}
|
||||
|
||||
func (m *Message) encode(pe packetEncoder) error {
|
||||
pe.push(&crc32Field{})
|
||||
|
||||
pe.putInt8(m.Version)
|
||||
|
||||
attributes := int8(m.Codec) & compressionCodecMask
|
||||
pe.putInt8(attributes)
|
||||
|
||||
if m.Version >= 1 {
|
||||
pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond))
|
||||
}
|
||||
|
||||
err := pe.putBytes(m.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var payload []byte
|
||||
|
||||
if m.compressedCache != nil {
|
||||
payload = m.compressedCache
|
||||
m.compressedCache = nil
|
||||
} else if m.Value != nil {
|
||||
switch m.Codec {
|
||||
case CompressionNone:
|
||||
payload = m.Value
|
||||
case CompressionGZIP:
|
||||
var buf bytes.Buffer
|
||||
writer := gzip.NewWriter(&buf)
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = buf.Bytes()
|
||||
payload = m.compressedCache
|
||||
case CompressionSnappy:
|
||||
tmp := snappy.Encode(m.Value)
|
||||
m.compressedCache = tmp
|
||||
payload = m.compressedCache
|
||||
case CompressionLZ4:
|
||||
var buf bytes.Buffer
|
||||
writer := lz4.NewWriter(&buf)
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = buf.Bytes()
|
||||
payload = m.compressedCache
|
||||
|
||||
default:
|
||||
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
|
||||
}
|
||||
// Keep in mind the compressed payload size for metric gathering
|
||||
m.compressedSize = len(payload)
|
||||
}
|
||||
|
||||
if err = pe.putBytes(payload); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (m *Message) decode(pd packetDecoder) (err error) {
|
||||
err = pd.push(&crc32Field{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Version, err = pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attribute, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Codec = CompressionCodec(attribute & compressionCodecMask)
|
||||
|
||||
if m.Version >= 1 {
|
||||
millis, err := pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
|
||||
}
|
||||
|
||||
m.Key, err = pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Value, err = pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Required for deep equal assertion during tests but might be useful
|
||||
// for future metrics about the compression ratio in fetch requests
|
||||
m.compressedSize = len(m.Value)
|
||||
|
||||
switch m.Codec {
|
||||
case CompressionNone:
|
||||
// nothing to do
|
||||
case CompressionGZIP:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.Value, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionSnappy:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
if m.Value, err = snappy.Decode(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionLZ4:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
reader := lz4.NewReader(bytes.NewReader(m.Value))
|
||||
if m.Value, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
|
||||
}
|
||||
|
||||
return pd.pop()
|
||||
}
|
||||
|
||||
// decodes a message set from a previousy encoded bulk-message
|
||||
func (m *Message) decodeSet() (err error) {
|
||||
pd := realDecoder{raw: m.Value}
|
||||
m.Set = &MessageSet{}
|
||||
return m.Set.decode(&pd)
|
||||
}
|
||||
89
vendor/github.com/Shopify/sarama/message_set.go
generated
vendored
Normal file
89
vendor/github.com/Shopify/sarama/message_set.go
generated
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
package sarama
|
||||
|
||||
type MessageBlock struct {
|
||||
Offset int64
|
||||
Msg *Message
|
||||
}
|
||||
|
||||
// Messages convenience helper which returns either all the
|
||||
// messages that are wrapped in this block
|
||||
func (msb *MessageBlock) Messages() []*MessageBlock {
|
||||
if msb.Msg.Set != nil {
|
||||
return msb.Msg.Set.Messages
|
||||
}
|
||||
return []*MessageBlock{msb}
|
||||
}
|
||||
|
||||
func (msb *MessageBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt64(msb.Offset)
|
||||
pe.push(&lengthField{})
|
||||
err := msb.Msg.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
|
||||
if msb.Offset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pd.push(&lengthField{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msb.Msg = new(Message)
|
||||
if err = msb.Msg.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pd.pop(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type MessageSet struct {
|
||||
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
|
||||
Messages []*MessageBlock
|
||||
}
|
||||
|
||||
func (ms *MessageSet) encode(pe packetEncoder) error {
|
||||
for i := range ms.Messages {
|
||||
err := ms.Messages[i].encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) decode(pd packetDecoder) (err error) {
|
||||
ms.Messages = nil
|
||||
|
||||
for pd.remaining() > 0 {
|
||||
msb := new(MessageBlock)
|
||||
err = msb.decode(pd)
|
||||
switch err {
|
||||
case nil:
|
||||
ms.Messages = append(ms.Messages, msb)
|
||||
case ErrInsufficientData:
|
||||
// As an optimization the server is allowed to return a partial message at the
|
||||
// end of the message set. Clients should handle this case. So we just ignore such things.
|
||||
ms.PartialTrailingMessage = true
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) addMessage(msg *Message) {
|
||||
block := new(MessageBlock)
|
||||
block.Msg = msg
|
||||
ms.Messages = append(ms.Messages, block)
|
||||
}
|
||||
52
vendor/github.com/Shopify/sarama/metadata_request.go
generated
vendored
Normal file
52
vendor/github.com/Shopify/sarama/metadata_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package sarama
|
||||
|
||||
type MetadataRequest struct {
|
||||
Topics []string
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range r.Topics {
|
||||
err = pe.putString(r.Topics[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Topics = make([]string, topicCount)
|
||||
for i := range r.Topics {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Topics[i] = topic
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) key() int16 {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
||||
239
vendor/github.com/Shopify/sarama/metadata_response.go
generated
vendored
Normal file
239
vendor/github.com/Shopify/sarama/metadata_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
package sarama
|
||||
|
||||
type PartitionMetadata struct {
|
||||
Err KError
|
||||
ID int32
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
Isr []int32
|
||||
}
|
||||
|
||||
func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pm.Err = KError(tmp)
|
||||
|
||||
pm.ID, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Leader, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Replicas, err = pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Isr, err = pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(pm.Err))
|
||||
pe.putInt32(pm.ID)
|
||||
pe.putInt32(pm.Leader)
|
||||
|
||||
err = pe.putInt32Array(pm.Replicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putInt32Array(pm.Isr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type TopicMetadata struct {
|
||||
Err KError
|
||||
Name string
|
||||
Partitions []*PartitionMetadata
|
||||
}
|
||||
|
||||
func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tm.Err = KError(tmp)
|
||||
|
||||
tm.Name, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tm.Partitions = make([]*PartitionMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
tm.Partitions[i] = new(PartitionMetadata)
|
||||
err = tm.Partitions[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(tm.Err))
|
||||
|
||||
err = pe.putString(tm.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(tm.Partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pm := range tm.Partitions {
|
||||
err = pm.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type MetadataResponse struct {
|
||||
Brokers []*Broker
|
||||
Topics []*TopicMetadata
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Brokers = make([]*Broker, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Brokers[i] = new(Broker)
|
||||
err = r.Brokers[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Topics = make([]*TopicMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Topics[i] = new(TopicMetadata)
|
||||
err = r.Topics[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Brokers))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, broker := range r.Brokers {
|
||||
err = broker.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tm := range r.Topics {
|
||||
err = tm.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) key() int16 {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
||||
|
||||
// testing API
|
||||
|
||||
func (r *MetadataResponse) AddBroker(addr string, id int32) {
|
||||
r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
|
||||
var tmatch *TopicMetadata
|
||||
|
||||
for _, tm := range r.Topics {
|
||||
if tm.Name == topic {
|
||||
tmatch = tm
|
||||
goto foundTopic
|
||||
}
|
||||
}
|
||||
|
||||
tmatch = new(TopicMetadata)
|
||||
tmatch.Name = topic
|
||||
r.Topics = append(r.Topics, tmatch)
|
||||
|
||||
foundTopic:
|
||||
|
||||
tmatch.Err = err
|
||||
return tmatch
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
|
||||
tmatch := r.AddTopic(topic, ErrNoError)
|
||||
var pmatch *PartitionMetadata
|
||||
|
||||
for _, pm := range tmatch.Partitions {
|
||||
if pm.ID == partition {
|
||||
pmatch = pm
|
||||
goto foundPartition
|
||||
}
|
||||
}
|
||||
|
||||
pmatch = new(PartitionMetadata)
|
||||
pmatch.ID = partition
|
||||
tmatch.Partitions = append(tmatch.Partitions, pmatch)
|
||||
|
||||
foundPartition:
|
||||
|
||||
pmatch.Leader = brokerID
|
||||
pmatch.Replicas = replicas
|
||||
pmatch.Isr = isr
|
||||
pmatch.Err = err
|
||||
|
||||
}
|
||||
51
vendor/github.com/Shopify/sarama/metrics.go
generated
vendored
Normal file
51
vendor/github.com/Shopify/sarama/metrics.go
generated
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
|
||||
// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
|
||||
// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
|
||||
// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
|
||||
const (
|
||||
metricsReservoirSize = 1028
|
||||
metricsAlphaFactor = 0.015
|
||||
)
|
||||
|
||||
func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
|
||||
return r.GetOrRegister(name, func() metrics.Histogram {
|
||||
return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
|
||||
}).(metrics.Histogram)
|
||||
}
|
||||
|
||||
func getMetricNameForBroker(name string, broker *Broker) string {
|
||||
// Use broker id like the Java client as it does not contain '.' or ':' characters that
|
||||
// can be interpreted as special character by monitoring tool (e.g. Graphite)
|
||||
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
|
||||
}
|
||||
|
||||
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
|
||||
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
|
||||
}
|
||||
|
||||
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
|
||||
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
|
||||
}
|
||||
|
||||
func getMetricNameForTopic(name string, topic string) string {
|
||||
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
|
||||
// cf. KAFKA-1902 and KAFKA-2337
|
||||
return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
|
||||
}
|
||||
|
||||
func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
|
||||
return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
|
||||
}
|
||||
|
||||
func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
|
||||
return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
|
||||
}
|
||||
324
vendor/github.com/Shopify/sarama/mockbroker.go
generated
vendored
Normal file
324
vendor/github.com/Shopify/sarama/mockbroker.go
generated
vendored
Normal file
|
|
@ -0,0 +1,324 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
const (
|
||||
expectationTimeout = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
type requestHandlerFunc func(req *request) (res encoder)
|
||||
|
||||
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
|
||||
// and will provides the number of bytes read and written.
|
||||
type RequestNotifierFunc func(bytesRead, bytesWritten int)
|
||||
|
||||
// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
|
||||
// to facilitate testing of higher level or specialized consumers and producers
|
||||
// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
|
||||
// but rather provides a facility to do that. It takes care of the TCP
|
||||
// transport, request unmarshaling, response marshaling, and makes it the test
|
||||
// writer responsibility to program correct according to the Kafka API protocol
|
||||
// MockBroker behaviour.
|
||||
//
|
||||
// MockBroker is implemented as a TCP server listening on a kernel-selected
|
||||
// localhost port that can accept many connections. It reads Kafka requests
|
||||
// from that connection and returns responses programmed by the SetHandlerByMap
|
||||
// function. If a MockBroker receives a request that it has no programmed
|
||||
// response for, then it returns nothing and the request times out.
|
||||
//
|
||||
// A set of MockRequest builders to define mappings used by MockBroker is
|
||||
// provided by Sarama. But users can develop MockRequests of their own and use
|
||||
// them along with or instead of the standard ones.
|
||||
//
|
||||
// When running tests with MockBroker it is strongly recommended to specify
|
||||
// a timeout to `go test` so that if the broker hangs waiting for a response,
|
||||
// the test panics.
|
||||
//
|
||||
// It is not necessary to prefix message length or correlation ID to your
|
||||
// response bytes, the server does that automatically as a convenience.
|
||||
type MockBroker struct {
|
||||
brokerID int32
|
||||
port int32
|
||||
closing chan none
|
||||
stopper chan none
|
||||
expectations chan encoder
|
||||
listener net.Listener
|
||||
t TestReporter
|
||||
latency time.Duration
|
||||
handler requestHandlerFunc
|
||||
notifier RequestNotifierFunc
|
||||
history []RequestResponse
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// RequestResponse represents a Request/Response pair processed by MockBroker.
|
||||
type RequestResponse struct {
|
||||
Request protocolBody
|
||||
Response encoder
|
||||
}
|
||||
|
||||
// SetLatency makes broker pause for the specified period every time before
|
||||
// replying.
|
||||
func (b *MockBroker) SetLatency(latency time.Duration) {
|
||||
b.latency = latency
|
||||
}
|
||||
|
||||
// SetHandlerByMap defines mapping of Request types to MockResponses. When a
|
||||
// request is received by the broker, it looks up the request type in the map
|
||||
// and uses the found MockResponse instance to generate an appropriate reply.
|
||||
// If the request type is not found in the map then nothing is sent.
|
||||
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
|
||||
b.setHandler(func(req *request) (res encoder) {
|
||||
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
|
||||
mockResponse := handlerMap[reqTypeName]
|
||||
if mockResponse == nil {
|
||||
return nil
|
||||
}
|
||||
return mockResponse.For(req.body)
|
||||
})
|
||||
}
|
||||
|
||||
// SetNotifier set a function that will get invoked whenever a request has been
|
||||
// processed successfully and will provide the number of bytes read and written
|
||||
func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
|
||||
b.lock.Lock()
|
||||
b.notifier = notifier
|
||||
b.lock.Unlock()
|
||||
}
|
||||
|
||||
// BrokerID returns broker ID assigned to the broker.
|
||||
func (b *MockBroker) BrokerID() int32 {
|
||||
return b.brokerID
|
||||
}
|
||||
|
||||
// History returns a slice of RequestResponse pairs in the order they were
|
||||
// processed by the broker. Note that in case of multiple connections to the
|
||||
// broker the order expected by a test can be different from the order recorded
|
||||
// in the history, unless some synchronization is implemented in the test.
|
||||
func (b *MockBroker) History() []RequestResponse {
|
||||
b.lock.Lock()
|
||||
history := make([]RequestResponse, len(b.history))
|
||||
copy(history, b.history)
|
||||
b.lock.Unlock()
|
||||
return history
|
||||
}
|
||||
|
||||
// Port returns the TCP port number the broker is listening for requests on.
|
||||
func (b *MockBroker) Port() int32 {
|
||||
return b.port
|
||||
}
|
||||
|
||||
// Addr returns the broker connection string in the form "<address>:<port>".
|
||||
func (b *MockBroker) Addr() string {
|
||||
return b.listener.Addr().String()
|
||||
}
|
||||
|
||||
// Close terminates the broker blocking until it stops internal goroutines and
|
||||
// releases all resources.
|
||||
func (b *MockBroker) Close() {
|
||||
close(b.expectations)
|
||||
if len(b.expectations) > 0 {
|
||||
buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
|
||||
for e := range b.expectations {
|
||||
_, _ = buf.WriteString(spew.Sdump(e))
|
||||
}
|
||||
b.t.Error(buf.String())
|
||||
}
|
||||
close(b.closing)
|
||||
<-b.stopper
|
||||
}
|
||||
|
||||
// setHandler sets the specified function as the request handler. Whenever
|
||||
// a mock broker reads a request from the wire it passes the request to the
|
||||
// function and sends back whatever the handler function returns.
|
||||
func (b *MockBroker) setHandler(handler requestHandlerFunc) {
|
||||
b.lock.Lock()
|
||||
b.handler = handler
|
||||
b.lock.Unlock()
|
||||
}
|
||||
|
||||
func (b *MockBroker) serverLoop() {
|
||||
defer close(b.stopper)
|
||||
var err error
|
||||
var conn net.Conn
|
||||
|
||||
go func() {
|
||||
<-b.closing
|
||||
err := b.listener.Close()
|
||||
if err != nil {
|
||||
b.t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
i := 0
|
||||
for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
|
||||
wg.Add(1)
|
||||
go b.handleRequests(conn, i, wg)
|
||||
i++
|
||||
}
|
||||
wg.Wait()
|
||||
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
|
||||
}
|
||||
|
||||
func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
|
||||
var err error
|
||||
|
||||
abort := make(chan none)
|
||||
defer close(abort)
|
||||
go func() {
|
||||
select {
|
||||
case <-b.closing:
|
||||
_ = conn.Close()
|
||||
case <-abort:
|
||||
}
|
||||
}()
|
||||
|
||||
resHeader := make([]byte, 8)
|
||||
for {
|
||||
req, bytesRead, err := decodeRequest(conn)
|
||||
if err != nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
|
||||
if b.latency > 0 {
|
||||
time.Sleep(b.latency)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
res := b.handler(req)
|
||||
b.history = append(b.history, RequestResponse{req.body, res})
|
||||
b.lock.Unlock()
|
||||
|
||||
if res == nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
|
||||
continue
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
|
||||
|
||||
encodedRes, err := encode(res, nil)
|
||||
if err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if len(encodedRes) == 0 {
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, 0)
|
||||
}
|
||||
b.lock.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
|
||||
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
|
||||
if _, err = conn.Write(resHeader); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if _, err = conn.Write(encodedRes); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, len(resHeader)+len(encodedRes))
|
||||
}
|
||||
b.lock.Unlock()
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
|
||||
}
|
||||
|
||||
func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
|
||||
select {
|
||||
case res, ok := <-b.expectations:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return res
|
||||
case <-time.After(expectationTimeout):
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *MockBroker) serverError(err error) {
|
||||
isConnectionClosedError := false
|
||||
if _, ok := err.(*net.OpError); ok {
|
||||
isConnectionClosedError = true
|
||||
} else if err == io.EOF {
|
||||
isConnectionClosedError = true
|
||||
} else if err.Error() == "use of closed network connection" {
|
||||
isConnectionClosedError = true
|
||||
}
|
||||
|
||||
if isConnectionClosedError {
|
||||
return
|
||||
}
|
||||
|
||||
b.t.Errorf(err.Error())
|
||||
}
|
||||
|
||||
// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
|
||||
// test framework and a channel of responses to use. If an error occurs it is
|
||||
// simply logged to the TestReporter and the broker exits.
|
||||
func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
|
||||
return NewMockBrokerAddr(t, brokerID, "localhost:0")
|
||||
}
|
||||
|
||||
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
|
||||
// it rather than just some ephemeral port.
|
||||
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
|
||||
var err error
|
||||
|
||||
broker := &MockBroker{
|
||||
closing: make(chan none),
|
||||
stopper: make(chan none),
|
||||
t: t,
|
||||
brokerID: brokerID,
|
||||
expectations: make(chan encoder, 512),
|
||||
}
|
||||
broker.handler = broker.defaultRequestHandler
|
||||
|
||||
broker.listener, err = net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
|
||||
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp, err := strconv.ParseInt(portStr, 10, 32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
broker.port = int32(tmp)
|
||||
|
||||
go broker.serverLoop()
|
||||
|
||||
return broker
|
||||
}
|
||||
|
||||
func (b *MockBroker) Returns(e encoder) {
|
||||
b.expectations <- e
|
||||
}
|
||||
455
vendor/github.com/Shopify/sarama/mockresponses.go
generated
vendored
Normal file
455
vendor/github.com/Shopify/sarama/mockresponses.go
generated
vendored
Normal file
|
|
@ -0,0 +1,455 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// TestReporter has methods matching go's testing.T to avoid importing
|
||||
// `testing` in the main part of the library.
|
||||
type TestReporter interface {
|
||||
Error(...interface{})
|
||||
Errorf(string, ...interface{})
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
}
|
||||
|
||||
// MockResponse is a response builder interface it defines one method that
|
||||
// allows generating a response based on a request body. MockResponses are used
|
||||
// to program behavior of MockBroker in tests.
|
||||
type MockResponse interface {
|
||||
For(reqBody versionedDecoder) (res encoder)
|
||||
}
|
||||
|
||||
// MockWrapper is a mock response builder that returns a particular concrete
|
||||
// response regardless of the actual request passed to the `For` method.
|
||||
type MockWrapper struct {
|
||||
res encoder
|
||||
}
|
||||
|
||||
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
|
||||
return mw.res
|
||||
}
|
||||
|
||||
func NewMockWrapper(res encoder) *MockWrapper {
|
||||
return &MockWrapper{res: res}
|
||||
}
|
||||
|
||||
// MockSequence is a mock response builder that is created from a sequence of
|
||||
// concrete responses. Every time when a `MockBroker` calls its `For` method
|
||||
// the next response from the sequence is returned. When the end of the
|
||||
// sequence is reached the last element from the sequence is returned.
|
||||
type MockSequence struct {
|
||||
responses []MockResponse
|
||||
}
|
||||
|
||||
func NewMockSequence(responses ...interface{}) *MockSequence {
|
||||
ms := &MockSequence{}
|
||||
ms.responses = make([]MockResponse, len(responses))
|
||||
for i, res := range responses {
|
||||
switch res := res.(type) {
|
||||
case MockResponse:
|
||||
ms.responses[i] = res
|
||||
case encoder:
|
||||
ms.responses[i] = NewMockWrapper(res)
|
||||
default:
|
||||
panic(fmt.Sprintf("Unexpected response type: %T", res))
|
||||
}
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
|
||||
res = mc.responses[0].For(reqBody)
|
||||
if len(mc.responses) > 1 {
|
||||
mc.responses = mc.responses[1:]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MockMetadataResponse is a `MetadataResponse` builder.
|
||||
type MockMetadataResponse struct {
|
||||
leaders map[string]map[int32]int32
|
||||
brokers map[string]int32
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
|
||||
return &MockMetadataResponse{
|
||||
leaders: make(map[string]map[int32]int32),
|
||||
brokers: make(map[string]int32),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
|
||||
partitions := mmr.leaders[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]int32)
|
||||
mmr.leaders[topic] = partitions
|
||||
}
|
||||
partitions[partition] = brokerID
|
||||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
|
||||
mmr.brokers[addr] = brokerID
|
||||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
metadataRequest := reqBody.(*MetadataRequest)
|
||||
metadataResponse := &MetadataResponse{}
|
||||
for addr, brokerID := range mmr.brokers {
|
||||
metadataResponse.AddBroker(addr, brokerID)
|
||||
}
|
||||
if len(metadataRequest.Topics) == 0 {
|
||||
for topic, partitions := range mmr.leaders {
|
||||
for partition, brokerID := range partitions {
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
|
||||
}
|
||||
}
|
||||
return metadataResponse
|
||||
}
|
||||
for _, topic := range metadataRequest.Topics {
|
||||
for partition, brokerID := range mmr.leaders[topic] {
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
|
||||
}
|
||||
}
|
||||
return metadataResponse
|
||||
}
|
||||
|
||||
// MockOffsetResponse is an `OffsetResponse` builder.
|
||||
type MockOffsetResponse struct {
|
||||
offsets map[string]map[int32]map[int64]int64
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
|
||||
return &MockOffsetResponse{
|
||||
offsets: make(map[string]map[int32]map[int64]int64),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
|
||||
partitions := mor.offsets[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]map[int64]int64)
|
||||
mor.offsets[topic] = partitions
|
||||
}
|
||||
times := partitions[partition]
|
||||
if times == nil {
|
||||
times = make(map[int64]int64)
|
||||
partitions[partition] = times
|
||||
}
|
||||
times[time] = offset
|
||||
return mor
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
|
||||
offsetRequest := reqBody.(*OffsetRequest)
|
||||
offsetResponse := &OffsetResponse{}
|
||||
for topic, partitions := range offsetRequest.blocks {
|
||||
for partition, block := range partitions {
|
||||
offset := mor.getOffset(topic, partition, block.time)
|
||||
offsetResponse.AddTopicPartition(topic, partition, offset)
|
||||
}
|
||||
}
|
||||
return offsetResponse
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
|
||||
partitions := mor.offsets[topic]
|
||||
if partitions == nil {
|
||||
mor.t.Errorf("missing topic: %s", topic)
|
||||
}
|
||||
times := partitions[partition]
|
||||
if times == nil {
|
||||
mor.t.Errorf("missing partition: %d", partition)
|
||||
}
|
||||
offset, ok := times[time]
|
||||
if !ok {
|
||||
mor.t.Errorf("missing time: %d", time)
|
||||
}
|
||||
return offset
|
||||
}
|
||||
|
||||
// MockFetchResponse is a `FetchResponse` builder.
|
||||
type MockFetchResponse struct {
|
||||
messages map[string]map[int32]map[int64]Encoder
|
||||
highWaterMarks map[string]map[int32]int64
|
||||
t TestReporter
|
||||
batchSize int
|
||||
}
|
||||
|
||||
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
|
||||
return &MockFetchResponse{
|
||||
messages: make(map[string]map[int32]map[int64]Encoder),
|
||||
highWaterMarks: make(map[string]map[int32]int64),
|
||||
t: t,
|
||||
batchSize: batchSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]map[int64]Encoder)
|
||||
mfr.messages[topic] = partitions
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
messages = make(map[int64]Encoder)
|
||||
partitions[partition] = messages
|
||||
}
|
||||
messages[offset] = msg
|
||||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
|
||||
partitions := mfr.highWaterMarks[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]int64)
|
||||
mfr.highWaterMarks[topic] = partitions
|
||||
}
|
||||
partitions[partition] = offset
|
||||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
fetchRequest := reqBody.(*FetchRequest)
|
||||
res := &FetchResponse{}
|
||||
for topic, partitions := range fetchRequest.blocks {
|
||||
for partition, block := range partitions {
|
||||
initialOffset := block.fetchOffset
|
||||
offset := initialOffset
|
||||
maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
|
||||
for i := 0; i < mfr.batchSize && offset < maxOffset; {
|
||||
msg := mfr.getMessage(topic, partition, offset)
|
||||
if msg != nil {
|
||||
res.AddMessage(topic, partition, nil, msg, offset)
|
||||
i++
|
||||
}
|
||||
offset++
|
||||
}
|
||||
fb := res.GetBlock(topic, partition)
|
||||
if fb == nil {
|
||||
res.AddError(topic, partition, ErrNoError)
|
||||
fb = res.GetBlock(topic, partition)
|
||||
}
|
||||
fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
return nil
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
return nil
|
||||
}
|
||||
return messages[offset]
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
return 0
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
return 0
|
||||
}
|
||||
return len(messages)
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
|
||||
partitions := mfr.highWaterMarks[topic]
|
||||
if partitions == nil {
|
||||
return 0
|
||||
}
|
||||
return partitions[partition]
|
||||
}
|
||||
|
||||
// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
|
||||
type MockConsumerMetadataResponse struct {
|
||||
coordinators map[string]interface{}
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
|
||||
return &MockConsumerMetadataResponse{
|
||||
coordinators: make(map[string]interface{}),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
|
||||
mr.coordinators[group] = broker
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
|
||||
mr.coordinators[group] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*ConsumerMetadataRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &ConsumerMetadataResponse{}
|
||||
v := mr.coordinators[group]
|
||||
switch v := v.(type) {
|
||||
case *MockBroker:
|
||||
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
|
||||
case KError:
|
||||
res.Err = v
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
|
||||
type MockOffsetCommitResponse struct {
|
||||
errors map[string]map[string]map[int32]KError
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
|
||||
return &MockOffsetCommitResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
|
||||
if mr.errors == nil {
|
||||
mr.errors = make(map[string]map[string]map[int32]KError)
|
||||
}
|
||||
topics := mr.errors[group]
|
||||
if topics == nil {
|
||||
topics = make(map[string]map[int32]KError)
|
||||
mr.errors[group] = topics
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
topics[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*OffsetCommitRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &OffsetCommitResponse{}
|
||||
for topic, partitions := range req.blocks {
|
||||
for partition := range partitions {
|
||||
res.AddError(topic, partition, mr.getError(group, topic, partition))
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
|
||||
topics := mr.errors[group]
|
||||
if topics == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
kerror, ok := partitions[partition]
|
||||
if !ok {
|
||||
return ErrNoError
|
||||
}
|
||||
return kerror
|
||||
}
|
||||
|
||||
// MockProduceResponse is a `ProduceResponse` builder.
|
||||
type MockProduceResponse struct {
|
||||
errors map[string]map[int32]KError
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
|
||||
return &MockProduceResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
|
||||
if mr.errors == nil {
|
||||
mr.errors = make(map[string]map[int32]KError)
|
||||
}
|
||||
partitions := mr.errors[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
mr.errors[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*ProduceRequest)
|
||||
res := &ProduceResponse{}
|
||||
for topic, partitions := range req.msgSets {
|
||||
for partition := range partitions {
|
||||
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
|
||||
partitions := mr.errors[topic]
|
||||
if partitions == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
kerror, ok := partitions[partition]
|
||||
if !ok {
|
||||
return ErrNoError
|
||||
}
|
||||
return kerror
|
||||
}
|
||||
|
||||
// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
|
||||
type MockOffsetFetchResponse struct {
|
||||
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
|
||||
return &MockOffsetFetchResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
|
||||
if mr.offsets == nil {
|
||||
mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
}
|
||||
topics := mr.offsets[group]
|
||||
if topics == nil {
|
||||
topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
mr.offsets[group] = topics
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*OffsetFetchResponseBlock)
|
||||
topics[topic] = partitions
|
||||
}
|
||||
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*OffsetFetchRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &OffsetFetchResponse{}
|
||||
for topic, partitions := range mr.offsets[group] {
|
||||
for partition, block := range partitions {
|
||||
res.AddBlock(topic, partition, block)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
190
vendor/github.com/Shopify/sarama/offset_commit_request.go
generated
vendored
Normal file
190
vendor/github.com/Shopify/sarama/offset_commit_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,190 @@
|
|||
package sarama
|
||||
|
||||
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
|
||||
// tells the broker to set the timestamp to the time at which the request was received.
|
||||
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
|
||||
const ReceiveTime int64 = -1
|
||||
|
||||
// GroupGenerationUndefined is a special value for the group generation field of
|
||||
// Offset Commit Requests that should be used when a consumer group does not rely
|
||||
// on Kafka for partition management.
|
||||
const GroupGenerationUndefined = -1
|
||||
|
||||
type offsetCommitRequestBlock struct {
|
||||
offset int64
|
||||
timestamp int64
|
||||
metadata string
|
||||
}
|
||||
|
||||
func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt64(b.offset)
|
||||
if version == 1 {
|
||||
pe.putInt64(b.timestamp)
|
||||
} else if b.timestamp != 0 {
|
||||
Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
|
||||
}
|
||||
|
||||
return pe.putString(b.metadata)
|
||||
}
|
||||
|
||||
func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
if b.offset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if version == 1 {
|
||||
if b.timestamp, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.metadata, err = pd.getString()
|
||||
return err
|
||||
}
|
||||
|
||||
type OffsetCommitRequest struct {
|
||||
ConsumerGroup string
|
||||
ConsumerGroupGeneration int32 // v1 or later
|
||||
ConsumerID string // v1 or later
|
||||
RetentionTime int64 // v2 or later
|
||||
|
||||
// Version can be:
|
||||
// - 0 (kafka 0.8.1 and later)
|
||||
// - 1 (kafka 0.8.2 and later)
|
||||
// - 2 (kafka 0.9.0 and later)
|
||||
Version int16
|
||||
blocks map[string]map[int32]*offsetCommitRequestBlock
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
|
||||
if r.Version < 0 || r.Version > 2 {
|
||||
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
|
||||
}
|
||||
|
||||
if err := pe.putString(r.ConsumerGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(r.ConsumerGroupGeneration)
|
||||
if err := pe.putString(r.ConsumerID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if r.ConsumerGroupGeneration != 0 {
|
||||
Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
|
||||
}
|
||||
if r.ConsumerID != "" {
|
||||
Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 2 {
|
||||
pe.putInt64(r.RetentionTime)
|
||||
} else if r.RetentionTime != 0 {
|
||||
Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.blocks {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err := block.encode(pe, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.ConsumerGroup, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.ConsumerID, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 2 {
|
||||
if r.RetentionTime, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block := &offsetCommitRequestBlock{}
|
||||
if err := block.decode(pd, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = block
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) key() int16 {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
case 2:
|
||||
return V0_9_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
|
||||
}
|
||||
|
||||
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
|
||||
}
|
||||
85
vendor/github.com/Shopify/sarama/offset_commit_response.go
generated
vendored
Normal file
85
vendor/github.com/Shopify/sarama/offset_commit_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
package sarama
|
||||
|
||||
type OffsetCommitResponse struct {
|
||||
Errors map[string]map[int32]KError
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
|
||||
if r.Errors == nil {
|
||||
r.Errors = make(map[string]map[int32]KError)
|
||||
}
|
||||
partitions := r.Errors[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
r.Errors[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Errors)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.Errors {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, kerror := range partitions {
|
||||
pe.putInt32(partition)
|
||||
pe.putInt16(int16(kerror))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil || numTopics == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Errors = make(map[string]map[int32]KError, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numErrors, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Errors[name] = make(map[int32]KError, numErrors)
|
||||
|
||||
for j := 0; j < numErrors; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Errors[name][id] = KError(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) key() int16 {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
||||
81
vendor/github.com/Shopify/sarama/offset_fetch_request.go
generated
vendored
Normal file
81
vendor/github.com/Shopify/sarama/offset_fetch_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
package sarama
|
||||
|
||||
type OffsetFetchRequest struct {
|
||||
ConsumerGroup string
|
||||
Version int16
|
||||
partitions map[string][]int32
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
|
||||
if r.Version < 0 || r.Version > 1 {
|
||||
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
|
||||
}
|
||||
|
||||
if err = pe.putString(r.ConsumerGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putArrayLength(len(r.partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.partitions {
|
||||
if err = pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putInt32Array(partitions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
if r.ConsumerGroup, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.partitions = make(map[string][]int32)
|
||||
for i := 0; i < partitionCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitions, err := pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.partitions[topic] = partitions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) key() int16 {
|
||||
return 9
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
|
||||
if r.partitions == nil {
|
||||
r.partitions = make(map[string][]int32)
|
||||
}
|
||||
|
||||
r.partitions[topic] = append(r.partitions[topic], partitionID)
|
||||
}
|
||||
143
vendor/github.com/Shopify/sarama/offset_fetch_response.go
generated
vendored
Normal file
143
vendor/github.com/Shopify/sarama/offset_fetch_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
package sarama
|
||||
|
||||
type OffsetFetchResponseBlock struct {
|
||||
Offset int64
|
||||
Metadata string
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Metadata, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt64(b.Offset)
|
||||
|
||||
err = pe.putString(b.Metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetFetchResponse struct {
|
||||
Blocks map[string]map[int32]*OffsetFetchResponseBlock
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.Blocks {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err := block.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil || numTopics == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numBlocks == 0 {
|
||||
r.Blocks[name] = nil
|
||||
continue
|
||||
}
|
||||
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(OffsetFetchResponseBlock)
|
||||
err = block.decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) key() int16 {
|
||||
return 9
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
}
|
||||
partitions := r.Blocks[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*OffsetFetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
partitions[partition] = block
|
||||
}
|
||||
542
vendor/github.com/Shopify/sarama/offset_manager.go
generated
vendored
Normal file
542
vendor/github.com/Shopify/sarama/offset_manager.go
generated
vendored
Normal file
|
|
@ -0,0 +1,542 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Offset Manager
|
||||
|
||||
// OffsetManager uses Kafka to store and fetch consumed partition offsets.
|
||||
type OffsetManager interface {
|
||||
// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
|
||||
// It will return an error if this OffsetManager is already managing the given
|
||||
// topic/partition.
|
||||
ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
|
||||
|
||||
// Close stops the OffsetManager from managing offsets. It is required to call
|
||||
// this function before an OffsetManager object passes out of scope, as it
|
||||
// will otherwise leak memory. You must call this after all the
|
||||
// PartitionOffsetManagers are closed.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type offsetManager struct {
|
||||
client Client
|
||||
conf *Config
|
||||
group string
|
||||
|
||||
lock sync.Mutex
|
||||
poms map[string]map[int32]*partitionOffsetManager
|
||||
boms map[*Broker]*brokerOffsetManager
|
||||
}
|
||||
|
||||
// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
|
||||
// It is still necessary to call Close() on the underlying client when finished with the partition manager.
|
||||
func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
om := &offsetManager{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
group: group,
|
||||
poms: make(map[string]map[int32]*partitionOffsetManager),
|
||||
boms: make(map[*Broker]*brokerOffsetManager),
|
||||
}
|
||||
|
||||
return om, nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
|
||||
pom, err := om.newPartitionOffsetManager(topic, partition)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
topicManagers := om.poms[topic]
|
||||
if topicManagers == nil {
|
||||
topicManagers = make(map[int32]*partitionOffsetManager)
|
||||
om.poms[topic] = topicManagers
|
||||
}
|
||||
|
||||
if topicManagers[partition] != nil {
|
||||
return nil, ConfigurationError("That topic/partition is already being managed")
|
||||
}
|
||||
|
||||
topicManagers[partition] = pom
|
||||
return pom, nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
bom := om.boms[broker]
|
||||
if bom == nil {
|
||||
bom = om.newBrokerOffsetManager(broker)
|
||||
om.boms[broker] = bom
|
||||
}
|
||||
|
||||
bom.refs++
|
||||
|
||||
return bom
|
||||
}
|
||||
|
||||
func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
bom.refs--
|
||||
|
||||
if bom.refs == 0 {
|
||||
close(bom.updateSubscriptions)
|
||||
if om.boms[bom.broker] == bom {
|
||||
delete(om.boms, bom.broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
delete(om.boms, bom.broker)
|
||||
}
|
||||
|
||||
func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
delete(om.poms[pom.topic], pom.partition)
|
||||
if len(om.poms[pom.topic]) == 0 {
|
||||
delete(om.poms, pom.topic)
|
||||
}
|
||||
}
|
||||
|
||||
// Partition Offset Manager
|
||||
|
||||
// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
|
||||
// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
|
||||
// out of scope.
|
||||
type PartitionOffsetManager interface {
|
||||
// NextOffset returns the next offset that should be consumed for the managed
|
||||
// partition, accompanied by metadata which can be used to reconstruct the state
|
||||
// of the partition consumer when it resumes. NextOffset() will return
|
||||
// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
|
||||
// was committed for this partition yet.
|
||||
NextOffset() (int64, string)
|
||||
|
||||
// MarkOffset marks the provided offset, alongside a metadata string
|
||||
// that represents the state of the partition consumer at that point in time. The
|
||||
// metadata string can be used by another consumer to restore that state, so it
|
||||
// can resume consumption.
|
||||
//
|
||||
// To follow upstream conventions, you are expected to mark the offset of the
|
||||
// next message to read, not the last message read. Thus, when calling `MarkOffset`
|
||||
// you should typically add one to the offset of the last consumed message.
|
||||
//
|
||||
// Note: calling MarkOffset does not necessarily commit the offset to the backend
|
||||
// store immediately for efficiency reasons, and it may never be committed if
|
||||
// your application crashes. This means that you may end up processing the same
|
||||
// message twice, and your processing should ideally be idempotent.
|
||||
MarkOffset(offset int64, metadata string)
|
||||
|
||||
// Errors returns a read channel of errors that occur during offset management, if
|
||||
// enabled. By default, errors are logged and not returned over this channel. If
|
||||
// you want to implement any custom error handling, set your config's
|
||||
// Consumer.Return.Errors setting to true, and read from this channel.
|
||||
Errors() <-chan *ConsumerError
|
||||
|
||||
// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
|
||||
// return immediately, after which you should wait until the 'errors' channel has
|
||||
// been drained and closed. It is required to call this function, or Close before
|
||||
// a consumer object passes out of scope, as it will otherwise leak memory. You
|
||||
// must call this before calling Close on the underlying client.
|
||||
AsyncClose()
|
||||
|
||||
// Close stops the PartitionOffsetManager from managing offsets. It is required to
|
||||
// call this function (or AsyncClose) before a PartitionOffsetManager object
|
||||
// passes out of scope, as it will otherwise leak memory. You must call this
|
||||
// before calling Close on the underlying client.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type partitionOffsetManager struct {
|
||||
parent *offsetManager
|
||||
topic string
|
||||
partition int32
|
||||
|
||||
lock sync.Mutex
|
||||
offset int64
|
||||
metadata string
|
||||
dirty bool
|
||||
clean sync.Cond
|
||||
broker *brokerOffsetManager
|
||||
|
||||
errors chan *ConsumerError
|
||||
rebalance chan none
|
||||
dying chan none
|
||||
}
|
||||
|
||||
func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
|
||||
pom := &partitionOffsetManager{
|
||||
parent: om,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
|
||||
rebalance: make(chan none, 1),
|
||||
dying: make(chan none),
|
||||
}
|
||||
pom.clean.L = &pom.lock
|
||||
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pom.broker.updateSubscriptions <- pom
|
||||
|
||||
go withRecover(pom.mainLoop)
|
||||
|
||||
return pom, nil
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) mainLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-pom.rebalance:
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
pom.handleError(err)
|
||||
pom.rebalance <- none{}
|
||||
} else {
|
||||
pom.broker.updateSubscriptions <- pom
|
||||
}
|
||||
case <-pom.dying:
|
||||
if pom.broker != nil {
|
||||
select {
|
||||
case <-pom.rebalance:
|
||||
case pom.broker.updateSubscriptions <- pom:
|
||||
}
|
||||
pom.parent.unrefBrokerOffsetManager(pom.broker)
|
||||
}
|
||||
pom.parent.abandonPartitionOffsetManager(pom)
|
||||
close(pom.errors)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) selectBroker() error {
|
||||
if pom.broker != nil {
|
||||
pom.parent.unrefBrokerOffsetManager(pom.broker)
|
||||
pom.broker = nil
|
||||
}
|
||||
|
||||
var broker *Broker
|
||||
var err error
|
||||
|
||||
if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pom.broker = pom.parent.refBrokerOffsetManager(broker)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
|
||||
request := new(OffsetFetchRequest)
|
||||
request.Version = 1
|
||||
request.ConsumerGroup = pom.parent.group
|
||||
request.AddPartition(pom.topic, pom.partition)
|
||||
|
||||
response, err := pom.broker.broker.FetchOffset(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := response.GetBlock(pom.topic, pom.partition)
|
||||
if block == nil {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
case ErrNoError:
|
||||
pom.offset = block.Offset
|
||||
pom.metadata = block.Metadata
|
||||
return nil
|
||||
case ErrNotCoordinatorForConsumer:
|
||||
if retries <= 0 {
|
||||
return block.Err
|
||||
}
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
return err
|
||||
}
|
||||
return pom.fetchInitialOffset(retries - 1)
|
||||
case ErrOffsetsLoadInProgress:
|
||||
if retries <= 0 {
|
||||
return block.Err
|
||||
}
|
||||
time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
|
||||
return pom.fetchInitialOffset(retries - 1)
|
||||
default:
|
||||
return block.Err
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) handleError(err error) {
|
||||
cErr := &ConsumerError{
|
||||
Topic: pom.topic,
|
||||
Partition: pom.partition,
|
||||
Err: err,
|
||||
}
|
||||
|
||||
if pom.parent.conf.Consumer.Return.Errors {
|
||||
pom.errors <- cErr
|
||||
} else {
|
||||
Logger.Println(cErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
|
||||
return pom.errors
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if offset > pom.offset {
|
||||
pom.offset = offset
|
||||
pom.metadata = metadata
|
||||
pom.dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if pom.offset == offset && pom.metadata == metadata {
|
||||
pom.dirty = false
|
||||
pom.clean.Signal()
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) NextOffset() (int64, string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if pom.offset >= 0 {
|
||||
return pom.offset, pom.metadata
|
||||
}
|
||||
|
||||
return pom.parent.conf.Consumer.Offsets.Initial, ""
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) AsyncClose() {
|
||||
go func() {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
for pom.dirty {
|
||||
pom.clean.Wait()
|
||||
}
|
||||
|
||||
close(pom.dying)
|
||||
}()
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) Close() error {
|
||||
pom.AsyncClose()
|
||||
|
||||
var errors ConsumerErrors
|
||||
for err := range pom.errors {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Broker Offset Manager
|
||||
|
||||
type brokerOffsetManager struct {
|
||||
parent *offsetManager
|
||||
broker *Broker
|
||||
timer *time.Ticker
|
||||
updateSubscriptions chan *partitionOffsetManager
|
||||
subscriptions map[*partitionOffsetManager]none
|
||||
refs int
|
||||
}
|
||||
|
||||
func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
|
||||
bom := &brokerOffsetManager{
|
||||
parent: om,
|
||||
broker: broker,
|
||||
timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
|
||||
updateSubscriptions: make(chan *partitionOffsetManager),
|
||||
subscriptions: make(map[*partitionOffsetManager]none),
|
||||
}
|
||||
|
||||
go withRecover(bom.mainLoop)
|
||||
|
||||
return bom
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) mainLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-bom.timer.C:
|
||||
if len(bom.subscriptions) > 0 {
|
||||
bom.flushToBroker()
|
||||
}
|
||||
case s, ok := <-bom.updateSubscriptions:
|
||||
if !ok {
|
||||
bom.timer.Stop()
|
||||
return
|
||||
}
|
||||
if _, ok := bom.subscriptions[s]; ok {
|
||||
delete(bom.subscriptions, s)
|
||||
} else {
|
||||
bom.subscriptions[s] = none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) flushToBroker() {
|
||||
request := bom.constructRequest()
|
||||
if request == nil {
|
||||
return
|
||||
}
|
||||
|
||||
response, err := bom.broker.CommitOffset(request)
|
||||
|
||||
if err != nil {
|
||||
bom.abort(err)
|
||||
return
|
||||
}
|
||||
|
||||
for s := range bom.subscriptions {
|
||||
if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var err KError
|
||||
var ok bool
|
||||
|
||||
if response.Errors[s.topic] == nil {
|
||||
s.handleError(ErrIncompleteResponse)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
continue
|
||||
}
|
||||
if err, ok = response.Errors[s.topic][s.partition]; !ok {
|
||||
s.handleError(ErrIncompleteResponse)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
continue
|
||||
}
|
||||
|
||||
switch err {
|
||||
case ErrNoError:
|
||||
block := request.blocks[s.topic][s.partition]
|
||||
s.updateCommitted(block.offset, block.metadata)
|
||||
case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
|
||||
ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
|
||||
// not a critical error, we just need to redispatch
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
|
||||
// nothing we can do about this, just tell the user and carry on
|
||||
s.handleError(err)
|
||||
case ErrOffsetsLoadInProgress:
|
||||
// nothing wrong but we didn't commit, we'll get it next time round
|
||||
break
|
||||
case ErrUnknownTopicOrPartition:
|
||||
// let the user know *and* try redispatching - if topic-auto-create is
|
||||
// enabled, redispatching should trigger a metadata request and create the
|
||||
// topic; if not then re-dispatching won't help, but we've let the user
|
||||
// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
|
||||
fallthrough
|
||||
default:
|
||||
// dunno, tell the user and try redispatching
|
||||
s.handleError(err)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
|
||||
var r *OffsetCommitRequest
|
||||
var perPartitionTimestamp int64
|
||||
if bom.parent.conf.Consumer.Offsets.Retention == 0 {
|
||||
perPartitionTimestamp = ReceiveTime
|
||||
r = &OffsetCommitRequest{
|
||||
Version: 1,
|
||||
ConsumerGroup: bom.parent.group,
|
||||
ConsumerGroupGeneration: GroupGenerationUndefined,
|
||||
}
|
||||
} else {
|
||||
r = &OffsetCommitRequest{
|
||||
Version: 2,
|
||||
RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
|
||||
ConsumerGroup: bom.parent.group,
|
||||
ConsumerGroupGeneration: GroupGenerationUndefined,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for s := range bom.subscriptions {
|
||||
s.lock.Lock()
|
||||
if s.dirty {
|
||||
r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
if len(r.blocks) > 0 {
|
||||
return r
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) abort(err error) {
|
||||
_ = bom.broker.Close() // we don't care about the error this might return, we already have one
|
||||
bom.parent.abandonBroker(bom)
|
||||
|
||||
for pom := range bom.subscriptions {
|
||||
pom.handleError(err)
|
||||
pom.rebalance <- none{}
|
||||
}
|
||||
|
||||
for s := range bom.updateSubscriptions {
|
||||
if _, ok := bom.subscriptions[s]; !ok {
|
||||
s.handleError(err)
|
||||
s.rebalance <- none{}
|
||||
}
|
||||
}
|
||||
|
||||
bom.subscriptions = make(map[*partitionOffsetManager]none)
|
||||
}
|
||||
132
vendor/github.com/Shopify/sarama/offset_request.go
generated
vendored
Normal file
132
vendor/github.com/Shopify/sarama/offset_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
package sarama
|
||||
|
||||
type offsetRequestBlock struct {
|
||||
time int64
|
||||
maxOffsets int32 // Only used in version 0
|
||||
}
|
||||
|
||||
func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt64(int64(b.time))
|
||||
if version == 0 {
|
||||
pe.putInt32(b.maxOffsets)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
if b.time, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if version == 0 {
|
||||
if b.maxOffsets, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetRequest struct {
|
||||
Version int16
|
||||
blocks map[string]map[int32]*offsetRequestBlock
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt32(-1) // replica ID is always -1 for clients
|
||||
err := pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err = block.encode(pe, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
|
||||
r.Version = version
|
||||
|
||||
// Ignore replica ID
|
||||
if _, err := pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
blockCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
|
||||
for i := 0; i < blockCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block := &offsetRequestBlock{}
|
||||
if err := block.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = block
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) key() int16 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
|
||||
}
|
||||
|
||||
tmp := new(offsetRequestBlock)
|
||||
tmp.time = time
|
||||
if r.Version == 0 {
|
||||
tmp.maxOffsets = maxOffsets
|
||||
}
|
||||
|
||||
r.blocks[topic][partitionID] = tmp
|
||||
}
|
||||
174
vendor/github.com/Shopify/sarama/offset_response.go
generated
vendored
Normal file
174
vendor/github.com/Shopify/sarama/offset_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,174 @@
|
|||
package sarama
|
||||
|
||||
type OffsetResponseBlock struct {
|
||||
Err KError
|
||||
Offsets []int64 // Version 0
|
||||
Offset int64 // Version 1
|
||||
Timestamp int64 // Version 1
|
||||
}
|
||||
|
||||
func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
if version == 0 {
|
||||
b.Offsets, err = pd.getInt64Array()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
b.Timestamp, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For backwards compatibility put the offset in the offsets array too
|
||||
b.Offsets = []int64{b.Offset}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
if version == 0 {
|
||||
return pe.putInt64Array(b.Offsets)
|
||||
}
|
||||
|
||||
pe.putInt64(b.Timestamp)
|
||||
pe.putInt64(b.Offset)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetResponse struct {
|
||||
Version int16
|
||||
Blocks map[string]map[int32]*OffsetResponseBlock
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(OffsetResponseBlock)
|
||||
err = block.decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
/*
|
||||
// [0 0 0 1 ntopics
|
||||
0 8 109 121 95 116 111 112 105 99 topic
|
||||
0 0 0 1 npartitions
|
||||
0 0 0 0 id
|
||||
0 0
|
||||
|
||||
0 0 0 1 0 0 0 0
|
||||
0 1 1 1 0 0 0 1
|
||||
0 8 109 121 95 116 111 112
|
||||
105 99 0 0 0 1 0 0
|
||||
0 0 0 0 0 0 0 1
|
||||
0 0 0 0 0 1 1 1] <nil>
|
||||
|
||||
*/
|
||||
func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
|
||||
if err = pe.putArrayLength(len(r.Blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range r.Blocks {
|
||||
if err = pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err = block.encode(pe, r.version()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) key() int16 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
// testing API
|
||||
|
||||
func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
|
||||
}
|
||||
byTopic, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
byTopic = make(map[int32]*OffsetResponseBlock)
|
||||
r.Blocks[topic] = byTopic
|
||||
}
|
||||
byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
|
||||
}
|
||||
45
vendor/github.com/Shopify/sarama/packet_decoder.go
generated
vendored
Normal file
45
vendor/github.com/Shopify/sarama/packet_decoder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package sarama
|
||||
|
||||
// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
|
||||
// Types implementing Decoder only need to worry about calling methods like GetString,
|
||||
// not about how a string is represented in Kafka.
|
||||
type packetDecoder interface {
|
||||
// Primitives
|
||||
getInt8() (int8, error)
|
||||
getInt16() (int16, error)
|
||||
getInt32() (int32, error)
|
||||
getInt64() (int64, error)
|
||||
getArrayLength() (int, error)
|
||||
|
||||
// Collections
|
||||
getBytes() ([]byte, error)
|
||||
getString() (string, error)
|
||||
getInt32Array() ([]int32, error)
|
||||
getInt64Array() ([]int64, error)
|
||||
getStringArray() ([]string, error)
|
||||
|
||||
// Subsets
|
||||
remaining() int
|
||||
getSubset(length int) (packetDecoder, error)
|
||||
|
||||
// Stacks, see PushDecoder
|
||||
push(in pushDecoder) error
|
||||
pop() error
|
||||
}
|
||||
|
||||
// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
|
||||
// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
|
||||
// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
|
||||
// depend upon have been decoded.
|
||||
type pushDecoder interface {
|
||||
// Saves the offset into the input buffer as the location to actually read the calculated value when able.
|
||||
saveOffset(in int)
|
||||
|
||||
// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
|
||||
reserveLength() int
|
||||
|
||||
// Indicates that all required data is now available to calculate and check the field.
|
||||
// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
|
||||
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
|
||||
check(curOffset int, buf []byte) error
|
||||
}
|
||||
50
vendor/github.com/Shopify/sarama/packet_encoder.go
generated
vendored
Normal file
50
vendor/github.com/Shopify/sarama/packet_encoder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
package sarama
|
||||
|
||||
import "github.com/rcrowley/go-metrics"
|
||||
|
||||
// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
|
||||
// Types implementing Encoder only need to worry about calling methods like PutString,
|
||||
// not about how a string is represented in Kafka.
|
||||
type packetEncoder interface {
|
||||
// Primitives
|
||||
putInt8(in int8)
|
||||
putInt16(in int16)
|
||||
putInt32(in int32)
|
||||
putInt64(in int64)
|
||||
putArrayLength(in int) error
|
||||
|
||||
// Collections
|
||||
putBytes(in []byte) error
|
||||
putRawBytes(in []byte) error
|
||||
putString(in string) error
|
||||
putStringArray(in []string) error
|
||||
putInt32Array(in []int32) error
|
||||
putInt64Array(in []int64) error
|
||||
|
||||
// Provide the current offset to record the batch size metric
|
||||
offset() int
|
||||
|
||||
// Stacks, see PushEncoder
|
||||
push(in pushEncoder)
|
||||
pop() error
|
||||
|
||||
// To record metrics when provided
|
||||
metricRegistry() metrics.Registry
|
||||
}
|
||||
|
||||
// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
|
||||
// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
|
||||
// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
|
||||
// depend upon have been written.
|
||||
type pushEncoder interface {
|
||||
// Saves the offset into the input buffer as the location to actually write the calculated value when able.
|
||||
saveOffset(in int)
|
||||
|
||||
// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
|
||||
reserveLength() int
|
||||
|
||||
// Indicates that all required data is now available to calculate and write the field.
|
||||
// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
|
||||
// of data to the saved offset, based on the data between the saved offset and curOffset.
|
||||
run(curOffset int, buf []byte) error
|
||||
}
|
||||
123
vendor/github.com/Shopify/sarama/partitioner.go
generated
vendored
Normal file
123
vendor/github.com/Shopify/sarama/partitioner.go
generated
vendored
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
|
||||
// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
|
||||
// as simple default implementations.
|
||||
type Partitioner interface {
|
||||
// Partition takes a message and partition count and chooses a partition
|
||||
Partition(message *ProducerMessage, numPartitions int32) (int32, error)
|
||||
|
||||
// RequiresConsistency indicates to the user of the partitioner whether the
|
||||
// mapping of key->partition is consistent or not. Specifically, if a
|
||||
// partitioner requires consistency then it must be allowed to choose from all
|
||||
// partitions (even ones known to be unavailable), and its choice must be
|
||||
// respected by the caller. The obvious example is the HashPartitioner.
|
||||
RequiresConsistency() bool
|
||||
}
|
||||
|
||||
// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
|
||||
type PartitionerConstructor func(topic string) Partitioner
|
||||
|
||||
type manualPartitioner struct{}
|
||||
|
||||
// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
|
||||
// ProducerMessage's Partition field as the partition to produce to.
|
||||
func NewManualPartitioner(topic string) Partitioner {
|
||||
return new(manualPartitioner)
|
||||
}
|
||||
|
||||
func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
return message.Partition, nil
|
||||
}
|
||||
|
||||
func (p *manualPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type randomPartitioner struct {
|
||||
generator *rand.Rand
|
||||
}
|
||||
|
||||
// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
|
||||
func NewRandomPartitioner(topic string) Partitioner {
|
||||
p := new(randomPartitioner)
|
||||
p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
return int32(p.generator.Intn(int(numPartitions))), nil
|
||||
}
|
||||
|
||||
func (p *randomPartitioner) RequiresConsistency() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type roundRobinPartitioner struct {
|
||||
partition int32
|
||||
}
|
||||
|
||||
// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
|
||||
func NewRoundRobinPartitioner(topic string) Partitioner {
|
||||
return &roundRobinPartitioner{}
|
||||
}
|
||||
|
||||
func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
if p.partition >= numPartitions {
|
||||
p.partition = 0
|
||||
}
|
||||
ret := p.partition
|
||||
p.partition++
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *roundRobinPartitioner) RequiresConsistency() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type hashPartitioner struct {
|
||||
random Partitioner
|
||||
hasher hash.Hash32
|
||||
}
|
||||
|
||||
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
|
||||
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
|
||||
// modulus the number of partitions. This ensures that messages with the same key always end up on the
|
||||
// same partition.
|
||||
func NewHashPartitioner(topic string) Partitioner {
|
||||
p := new(hashPartitioner)
|
||||
p.random = NewRandomPartitioner(topic)
|
||||
p.hasher = fnv.New32a()
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
if message.Key == nil {
|
||||
return p.random.Partition(message, numPartitions)
|
||||
}
|
||||
bytes, err := message.Key.Encode()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
p.hasher.Reset()
|
||||
_, err = p.hasher.Write(bytes)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
partition := int32(p.hasher.Sum32()) % numPartitions
|
||||
if partition < 0 {
|
||||
partition = -partition
|
||||
}
|
||||
return partition, nil
|
||||
}
|
||||
|
||||
func (p *hashPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
||||
121
vendor/github.com/Shopify/sarama/prep_encoder.go
generated
vendored
Normal file
121
vendor/github.com/Shopify/sarama/prep_encoder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
type prepEncoder struct {
|
||||
length int
|
||||
}
|
||||
|
||||
// primitives
|
||||
|
||||
func (pe *prepEncoder) putInt8(in int8) {
|
||||
pe.length++
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt16(in int16) {
|
||||
pe.length += 2
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt32(in int32) {
|
||||
pe.length += 4
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt64(in int64) {
|
||||
pe.length += 8
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putArrayLength(in int) error {
|
||||
if in > math.MaxInt32 {
|
||||
return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
|
||||
}
|
||||
pe.length += 4
|
||||
return nil
|
||||
}
|
||||
|
||||
// arrays
|
||||
|
||||
func (pe *prepEncoder) putBytes(in []byte) error {
|
||||
pe.length += 4
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
if len(in) > math.MaxInt32 {
|
||||
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
|
||||
}
|
||||
pe.length += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putRawBytes(in []byte) error {
|
||||
if len(in) > math.MaxInt32 {
|
||||
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
|
||||
}
|
||||
pe.length += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putString(in string) error {
|
||||
pe.length += 2
|
||||
if len(in) > math.MaxInt16 {
|
||||
return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
|
||||
}
|
||||
pe.length += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putStringArray(in []string) error {
|
||||
err := pe.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, str := range in {
|
||||
if err := pe.putString(str); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt32Array(in []int32) error {
|
||||
err := pe.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.length += 4 * len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) putInt64Array(in []int64) error {
|
||||
err := pe.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.length += 8 * len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) offset() int {
|
||||
return pe.length
|
||||
}
|
||||
|
||||
// stackable
|
||||
|
||||
func (pe *prepEncoder) push(in pushEncoder) {
|
||||
pe.length += in.reserveLength()
|
||||
}
|
||||
|
||||
func (pe *prepEncoder) pop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// we do not record metrics during the prep encoder pass
|
||||
func (pe *prepEncoder) metricRegistry() metrics.Registry {
|
||||
return nil
|
||||
}
|
||||
209
vendor/github.com/Shopify/sarama/produce_request.go
generated
vendored
Normal file
209
vendor/github.com/Shopify/sarama/produce_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
package sarama
|
||||
|
||||
import "github.com/rcrowley/go-metrics"
|
||||
|
||||
// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
|
||||
// it must see before responding. Any of the constants defined here are valid. On broker versions
|
||||
// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
|
||||
// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
|
||||
// by setting the `min.isr` value in the brokers configuration).
|
||||
type RequiredAcks int16
|
||||
|
||||
const (
|
||||
// NoResponse doesn't send any response, the TCP ACK is all you get.
|
||||
NoResponse RequiredAcks = 0
|
||||
// WaitForLocal waits for only the local commit to succeed before responding.
|
||||
WaitForLocal RequiredAcks = 1
|
||||
// WaitForAll waits for all in-sync replicas to commit before responding.
|
||||
// The minimum number of in-sync replicas is configured on the broker via
|
||||
// the `min.insync.replicas` configuration key.
|
||||
WaitForAll RequiredAcks = -1
|
||||
)
|
||||
|
||||
type ProduceRequest struct {
|
||||
RequiredAcks RequiredAcks
|
||||
Timeout int32
|
||||
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
|
||||
msgSets map[string]map[int32]*MessageSet
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.RequiredAcks))
|
||||
pe.putInt32(r.Timeout)
|
||||
err := pe.putArrayLength(len(r.msgSets))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricRegistry := pe.metricRegistry()
|
||||
var batchSizeMetric metrics.Histogram
|
||||
var compressionRatioMetric metrics.Histogram
|
||||
if metricRegistry != nil {
|
||||
batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
|
||||
compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
|
||||
}
|
||||
|
||||
totalRecordCount := int64(0)
|
||||
for topic, partitions := range r.msgSets {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topicRecordCount := int64(0)
|
||||
var topicCompressionRatioMetric metrics.Histogram
|
||||
if metricRegistry != nil {
|
||||
topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
|
||||
}
|
||||
for id, msgSet := range partitions {
|
||||
startOffset := pe.offset()
|
||||
pe.putInt32(id)
|
||||
pe.push(&lengthField{})
|
||||
err = msgSet.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.pop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if metricRegistry != nil {
|
||||
for _, messageBlock := range msgSet.Messages {
|
||||
// Is this a fake "message" wrapping real messages?
|
||||
if messageBlock.Msg.Set != nil {
|
||||
topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
|
||||
} else {
|
||||
// A single uncompressed message
|
||||
topicRecordCount++
|
||||
}
|
||||
// Better be safe than sorry when computing the compression ratio
|
||||
if messageBlock.Msg.compressedSize != 0 {
|
||||
compressionRatio := float64(len(messageBlock.Msg.Value)) /
|
||||
float64(messageBlock.Msg.compressedSize)
|
||||
// Histogram do not support decimal values, let's multiple it by 100 for better precision
|
||||
intCompressionRatio := int64(100 * compressionRatio)
|
||||
compressionRatioMetric.Update(intCompressionRatio)
|
||||
topicCompressionRatioMetric.Update(intCompressionRatio)
|
||||
}
|
||||
}
|
||||
batchSize := int64(pe.offset() - startOffset)
|
||||
batchSizeMetric.Update(batchSize)
|
||||
getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
|
||||
}
|
||||
}
|
||||
if topicRecordCount > 0 {
|
||||
getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
|
||||
getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
|
||||
totalRecordCount += topicRecordCount
|
||||
}
|
||||
}
|
||||
if totalRecordCount > 0 {
|
||||
metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
|
||||
getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
|
||||
requiredAcks, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.RequiredAcks = RequiredAcks(requiredAcks)
|
||||
if r.Timeout, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
messageSetSize, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgSetDecoder, err := pd.getSubset(int(messageSetSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgSet := &MessageSet{}
|
||||
err = msgSet.decode(msgSetDecoder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.msgSets[topic][partition] = msgSet
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) key() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
|
||||
if r.msgSets == nil {
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
if r.msgSets[topic] == nil {
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
set := r.msgSets[topic][partition]
|
||||
|
||||
if set == nil {
|
||||
set = new(MessageSet)
|
||||
r.msgSets[topic][partition] = set
|
||||
}
|
||||
|
||||
set.addMessage(msg)
|
||||
}
|
||||
|
||||
func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
|
||||
if r.msgSets == nil {
|
||||
r.msgSets = make(map[string]map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
if r.msgSets[topic] == nil {
|
||||
r.msgSets[topic] = make(map[int32]*MessageSet)
|
||||
}
|
||||
|
||||
r.msgSets[topic][partition] = set
|
||||
}
|
||||
158
vendor/github.com/Shopify/sarama/produce_response.go
generated
vendored
Normal file
158
vendor/github.com/Shopify/sarama/produce_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type ProduceResponseBlock struct {
|
||||
Err KError
|
||||
Offset int64
|
||||
// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if version >= 2 {
|
||||
if millis, err := pd.getInt64(); err != nil {
|
||||
return err
|
||||
} else if millis != -1 {
|
||||
b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ProduceResponse struct {
|
||||
Blocks map[string]map[int32]*ProduceResponseBlock
|
||||
Version int16
|
||||
ThrottleTime time.Duration // only provided if Version >= 1
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(ProduceResponseBlock)
|
||||
err = block.decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
if millis, err := pd.getInt32(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.ThrottleTime = time.Duration(millis) * time.Millisecond
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.Blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for id, prb := range partitions {
|
||||
pe.putInt32(id)
|
||||
pe.putInt16(int16(prb.Err))
|
||||
pe.putInt64(prb.Offset)
|
||||
}
|
||||
}
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) key() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
// Testing API
|
||||
|
||||
func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
|
||||
}
|
||||
byTopic, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
byTopic = make(map[int32]*ProduceResponseBlock)
|
||||
r.Blocks[topic] = byTopic
|
||||
}
|
||||
byTopic[partition] = &ProduceResponseBlock{Err: err}
|
||||
}
|
||||
176
vendor/github.com/Shopify/sarama/produce_set.go
generated
vendored
Normal file
176
vendor/github.com/Shopify/sarama/produce_set.go
generated
vendored
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type partitionSet struct {
|
||||
msgs []*ProducerMessage
|
||||
setToSend *MessageSet
|
||||
bufferBytes int
|
||||
}
|
||||
|
||||
type produceSet struct {
|
||||
parent *asyncProducer
|
||||
msgs map[string]map[int32]*partitionSet
|
||||
|
||||
bufferBytes int
|
||||
bufferCount int
|
||||
}
|
||||
|
||||
func newProduceSet(parent *asyncProducer) *produceSet {
|
||||
return &produceSet{
|
||||
msgs: make(map[string]map[int32]*partitionSet),
|
||||
parent: parent,
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *produceSet) add(msg *ProducerMessage) error {
|
||||
var err error
|
||||
var key, val []byte
|
||||
|
||||
if msg.Key != nil {
|
||||
if key, err = msg.Key.Encode(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if msg.Value != nil {
|
||||
if val, err = msg.Value.Encode(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
partitions := ps.msgs[msg.Topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*partitionSet)
|
||||
ps.msgs[msg.Topic] = partitions
|
||||
}
|
||||
|
||||
set := partitions[msg.Partition]
|
||||
if set == nil {
|
||||
set = &partitionSet{setToSend: new(MessageSet)}
|
||||
partitions[msg.Partition] = set
|
||||
}
|
||||
|
||||
set.msgs = append(set.msgs, msg)
|
||||
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
if msg.Timestamp.IsZero() {
|
||||
msgToSend.Timestamp = time.Now()
|
||||
} else {
|
||||
msgToSend.Timestamp = msg.Timestamp
|
||||
}
|
||||
msgToSend.Version = 1
|
||||
}
|
||||
set.setToSend.addMessage(msgToSend)
|
||||
|
||||
size := producerMessageOverhead + len(key) + len(val)
|
||||
set.bufferBytes += size
|
||||
ps.bufferBytes += size
|
||||
ps.bufferCount++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *produceSet) buildRequest() *ProduceRequest {
|
||||
req := &ProduceRequest{
|
||||
RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
|
||||
Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
|
||||
}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
req.Version = 2
|
||||
}
|
||||
|
||||
for topic, partitionSet := range ps.msgs {
|
||||
for partition, set := range partitionSet {
|
||||
if ps.parent.conf.Producer.Compression == CompressionNone {
|
||||
req.AddSet(topic, partition, set.setToSend)
|
||||
} else {
|
||||
// When compression is enabled, the entire set for each partition is compressed
|
||||
// and sent as the payload of a single fake "message" with the appropriate codec
|
||||
// set and no key. When the server sees a message with a compression codec, it
|
||||
// decompresses the payload and treats the result as its message set.
|
||||
payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
Logger.Println(err) // if this happens, it's basically our fault.
|
||||
panic(err)
|
||||
}
|
||||
compMsg := &Message{
|
||||
Codec: ps.parent.conf.Producer.Compression,
|
||||
Key: nil,
|
||||
Value: payload,
|
||||
Set: set.setToSend, // Provide the underlying message set for accurate metrics
|
||||
}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
compMsg.Version = 1
|
||||
compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
|
||||
}
|
||||
req.AddMessage(topic, partition, compMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
|
||||
for topic, partitionSet := range ps.msgs {
|
||||
for partition, set := range partitionSet {
|
||||
cb(topic, partition, set.msgs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
|
||||
if ps.msgs[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
set := ps.msgs[topic][partition]
|
||||
if set == nil {
|
||||
return nil
|
||||
}
|
||||
ps.bufferBytes -= set.bufferBytes
|
||||
ps.bufferCount -= len(set.msgs)
|
||||
delete(ps.msgs[topic], partition)
|
||||
return set.msgs
|
||||
}
|
||||
|
||||
func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
|
||||
switch {
|
||||
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
|
||||
case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
|
||||
return true
|
||||
// Would we overflow the size-limit of a compressed message-batch for this partition?
|
||||
case ps.parent.conf.Producer.Compression != CompressionNone &&
|
||||
ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
|
||||
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
|
||||
return true
|
||||
// Would we overflow simply in number of messages?
|
||||
case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *produceSet) readyToFlush() bool {
|
||||
switch {
|
||||
// If we don't have any messages, nothing else matters
|
||||
case ps.empty():
|
||||
return false
|
||||
// If all three config values are 0, we always flush as-fast-as-possible
|
||||
case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
|
||||
return true
|
||||
// If we've passed the message trigger-point
|
||||
case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
|
||||
return true
|
||||
// If we've passed the byte trigger-point
|
||||
case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *produceSet) empty() bool {
|
||||
return ps.bufferCount == 0
|
||||
}
|
||||
259
vendor/github.com/Shopify/sarama/real_decoder.go
generated
vendored
Normal file
259
vendor/github.com/Shopify/sarama/real_decoder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
)
|
||||
|
||||
var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
|
||||
var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
|
||||
var errInvalidStringLength = PacketDecodingError{"invalid string length"}
|
||||
var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
|
||||
|
||||
type realDecoder struct {
|
||||
raw []byte
|
||||
off int
|
||||
stack []pushDecoder
|
||||
}
|
||||
|
||||
// primitives
|
||||
|
||||
func (rd *realDecoder) getInt8() (int8, error) {
|
||||
if rd.remaining() < 1 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int8(rd.raw[rd.off])
|
||||
rd.off++
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt16() (int16, error) {
|
||||
if rd.remaining() < 2 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
|
||||
rd.off += 2
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt32() (int32, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt64() (int64, error) {
|
||||
if rd.remaining() < 8 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
|
||||
rd.off += 8
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getArrayLength() (int, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
}
|
||||
tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
if tmp > rd.remaining() {
|
||||
rd.off = len(rd.raw)
|
||||
return -1, ErrInsufficientData
|
||||
} else if tmp > 2*math.MaxUint16 {
|
||||
return -1, errInvalidArrayLength
|
||||
}
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
// collections
|
||||
|
||||
func (rd *realDecoder) getBytes() ([]byte, error) {
|
||||
tmp, err := rd.getInt32()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := int(tmp)
|
||||
|
||||
switch {
|
||||
case n < -1:
|
||||
return nil, errInvalidByteSliceLength
|
||||
case n == -1:
|
||||
return nil, nil
|
||||
case n == 0:
|
||||
return make([]byte, 0), nil
|
||||
case n > rd.remaining():
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
|
||||
tmpStr := rd.raw[rd.off : rd.off+n]
|
||||
rd.off += n
|
||||
return tmpStr, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getString() (string, error) {
|
||||
tmp, err := rd.getInt16()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
n := int(tmp)
|
||||
|
||||
switch {
|
||||
case n < -1:
|
||||
return "", errInvalidStringLength
|
||||
case n == -1:
|
||||
return "", nil
|
||||
case n == 0:
|
||||
return "", nil
|
||||
case n > rd.remaining():
|
||||
rd.off = len(rd.raw)
|
||||
return "", ErrInsufficientData
|
||||
}
|
||||
|
||||
tmpStr := string(rd.raw[rd.off : rd.off+n])
|
||||
rd.off += n
|
||||
return tmpStr, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt32Array() ([]int32, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
|
||||
if rd.remaining() < 4*n {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
return nil, errInvalidArrayLength
|
||||
}
|
||||
|
||||
ret := make([]int32, n)
|
||||
for i := range ret {
|
||||
ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getInt64Array() ([]int64, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
|
||||
if rd.remaining() < 8*n {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
return nil, errInvalidArrayLength
|
||||
}
|
||||
|
||||
ret := make([]int64, n)
|
||||
for i := range ret {
|
||||
ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
|
||||
rd.off += 8
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getStringArray() ([]string, error) {
|
||||
if rd.remaining() < 4 {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
|
||||
rd.off += 4
|
||||
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
return nil, errInvalidArrayLength
|
||||
}
|
||||
|
||||
ret := make([]string, n)
|
||||
for i := range ret {
|
||||
if str, err := rd.getString(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
ret[i] = str
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// subsets
|
||||
|
||||
func (rd *realDecoder) remaining() int {
|
||||
return len(rd.raw) - rd.off
|
||||
}
|
||||
|
||||
func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
|
||||
if length < 0 {
|
||||
return nil, errInvalidSubsetSize
|
||||
} else if length > rd.remaining() {
|
||||
rd.off = len(rd.raw)
|
||||
return nil, ErrInsufficientData
|
||||
}
|
||||
|
||||
start := rd.off
|
||||
rd.off += length
|
||||
return &realDecoder{raw: rd.raw[start:rd.off]}, nil
|
||||
}
|
||||
|
||||
// stacks
|
||||
|
||||
func (rd *realDecoder) push(in pushDecoder) error {
|
||||
in.saveOffset(rd.off)
|
||||
|
||||
reserve := in.reserveLength()
|
||||
if rd.remaining() < reserve {
|
||||
rd.off = len(rd.raw)
|
||||
return ErrInsufficientData
|
||||
}
|
||||
|
||||
rd.stack = append(rd.stack, in)
|
||||
|
||||
rd.off += reserve
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rd *realDecoder) pop() error {
|
||||
// this is go's ugly pop pattern (the inverse of append)
|
||||
in := rd.stack[len(rd.stack)-1]
|
||||
rd.stack = rd.stack[:len(rd.stack)-1]
|
||||
|
||||
return in.check(rd.off, rd.raw)
|
||||
}
|
||||
129
vendor/github.com/Shopify/sarama/real_encoder.go
generated
vendored
Normal file
129
vendor/github.com/Shopify/sarama/real_encoder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
type realEncoder struct {
|
||||
raw []byte
|
||||
off int
|
||||
stack []pushEncoder
|
||||
registry metrics.Registry
|
||||
}
|
||||
|
||||
// primitives
|
||||
|
||||
func (re *realEncoder) putInt8(in int8) {
|
||||
re.raw[re.off] = byte(in)
|
||||
re.off++
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt16(in int16) {
|
||||
binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
|
||||
re.off += 2
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt32(in int32) {
|
||||
binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
|
||||
re.off += 4
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt64(in int64) {
|
||||
binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
|
||||
re.off += 8
|
||||
}
|
||||
|
||||
func (re *realEncoder) putArrayLength(in int) error {
|
||||
re.putInt32(int32(in))
|
||||
return nil
|
||||
}
|
||||
|
||||
// collection
|
||||
|
||||
func (re *realEncoder) putRawBytes(in []byte) error {
|
||||
copy(re.raw[re.off:], in)
|
||||
re.off += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putBytes(in []byte) error {
|
||||
if in == nil {
|
||||
re.putInt32(-1)
|
||||
return nil
|
||||
}
|
||||
re.putInt32(int32(len(in)))
|
||||
copy(re.raw[re.off:], in)
|
||||
re.off += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putString(in string) error {
|
||||
re.putInt16(int16(len(in)))
|
||||
copy(re.raw[re.off:], in)
|
||||
re.off += len(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putStringArray(in []string) error {
|
||||
err := re.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, val := range in {
|
||||
if err := re.putString(val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt32Array(in []int32) error {
|
||||
err := re.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, val := range in {
|
||||
re.putInt32(val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) putInt64Array(in []int64) error {
|
||||
err := re.putArrayLength(len(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, val := range in {
|
||||
re.putInt64(val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (re *realEncoder) offset() int {
|
||||
return re.off
|
||||
}
|
||||
|
||||
// stacks
|
||||
|
||||
func (re *realEncoder) push(in pushEncoder) {
|
||||
in.saveOffset(re.off)
|
||||
re.off += in.reserveLength()
|
||||
re.stack = append(re.stack, in)
|
||||
}
|
||||
|
||||
func (re *realEncoder) pop() error {
|
||||
// this is go's ugly pop pattern (the inverse of append)
|
||||
in := re.stack[len(re.stack)-1]
|
||||
re.stack = re.stack[:len(re.stack)-1]
|
||||
|
||||
return in.run(re.off, re.raw)
|
||||
}
|
||||
|
||||
// we do record metrics during the real encoder pass
|
||||
func (re *realEncoder) metricRegistry() metrics.Registry {
|
||||
return re.registry
|
||||
}
|
||||
119
vendor/github.com/Shopify/sarama/request.go
generated
vendored
Normal file
119
vendor/github.com/Shopify/sarama/request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type protocolBody interface {
|
||||
encoder
|
||||
versionedDecoder
|
||||
key() int16
|
||||
version() int16
|
||||
requiredVersion() KafkaVersion
|
||||
}
|
||||
|
||||
type request struct {
|
||||
correlationID int32
|
||||
clientID string
|
||||
body protocolBody
|
||||
}
|
||||
|
||||
func (r *request) encode(pe packetEncoder) (err error) {
|
||||
pe.push(&lengthField{})
|
||||
pe.putInt16(r.body.key())
|
||||
pe.putInt16(r.body.version())
|
||||
pe.putInt32(r.correlationID)
|
||||
err = pe.putString(r.clientID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.body.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (r *request) decode(pd packetDecoder) (err error) {
|
||||
var key int16
|
||||
if key, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
var version int16
|
||||
if version, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.correlationID, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.clientID, err = pd.getString()
|
||||
|
||||
r.body = allocateBody(key, version)
|
||||
if r.body == nil {
|
||||
return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
|
||||
}
|
||||
return r.body.decode(pd, version)
|
||||
}
|
||||
|
||||
func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
|
||||
lengthBytes := make([]byte, 4)
|
||||
if _, err := io.ReadFull(r, lengthBytes); err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
bytesRead += len(lengthBytes)
|
||||
|
||||
length := int32(binary.BigEndian.Uint32(lengthBytes))
|
||||
if length <= 4 || length > MaxRequestSize {
|
||||
return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
|
||||
}
|
||||
|
||||
encodedReq := make([]byte, length)
|
||||
if _, err := io.ReadFull(r, encodedReq); err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
bytesRead += len(encodedReq)
|
||||
|
||||
req = &request{}
|
||||
if err := decode(encodedReq, req); err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
return req, bytesRead, nil
|
||||
}
|
||||
|
||||
func allocateBody(key, version int16) protocolBody {
|
||||
switch key {
|
||||
case 0:
|
||||
return &ProduceRequest{}
|
||||
case 1:
|
||||
return &FetchRequest{}
|
||||
case 2:
|
||||
return &OffsetRequest{Version: version}
|
||||
case 3:
|
||||
return &MetadataRequest{}
|
||||
case 8:
|
||||
return &OffsetCommitRequest{Version: version}
|
||||
case 9:
|
||||
return &OffsetFetchRequest{}
|
||||
case 10:
|
||||
return &ConsumerMetadataRequest{}
|
||||
case 11:
|
||||
return &JoinGroupRequest{}
|
||||
case 12:
|
||||
return &HeartbeatRequest{}
|
||||
case 13:
|
||||
return &LeaveGroupRequest{}
|
||||
case 14:
|
||||
return &SyncGroupRequest{}
|
||||
case 15:
|
||||
return &DescribeGroupsRequest{}
|
||||
case 16:
|
||||
return &ListGroupsRequest{}
|
||||
case 17:
|
||||
return &SaslHandshakeRequest{}
|
||||
case 18:
|
||||
return &ApiVersionsRequest{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
21
vendor/github.com/Shopify/sarama/response_header.go
generated
vendored
Normal file
21
vendor/github.com/Shopify/sarama/response_header.go
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
package sarama
|
||||
|
||||
import "fmt"
|
||||
|
||||
type responseHeader struct {
|
||||
length int32
|
||||
correlationID int32
|
||||
}
|
||||
|
||||
func (r *responseHeader) decode(pd packetDecoder) (err error) {
|
||||
r.length, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if r.length <= 4 || r.length > MaxResponseSize {
|
||||
return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
|
||||
}
|
||||
|
||||
r.correlationID, err = pd.getInt32()
|
||||
return err
|
||||
}
|
||||
99
vendor/github.com/Shopify/sarama/sarama.go
generated
vendored
Normal file
99
vendor/github.com/Shopify/sarama/sarama.go
generated
vendored
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
|
||||
API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
|
||||
API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
|
||||
|
||||
To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
|
||||
and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
|
||||
The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
|
||||
useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
|
||||
depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
|
||||
SyncProducer can still sometimes be lost.
|
||||
|
||||
To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
|
||||
consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
|
||||
https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
|
||||
and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
|
||||
|
||||
For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
|
||||
and message sent on the wire; the Client provides higher-level metadata management that is shared between
|
||||
the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
|
||||
exactly with the protocol fields documented by Kafka at
|
||||
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
|
||||
|
||||
Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
|
||||
|
||||
Broker related metrics:
|
||||
|
||||
+----------------------------------------------+------------+---------------------------------------------------------------+
|
||||
| Name | Type | Description |
|
||||
+----------------------------------------------+------------+---------------------------------------------------------------+
|
||||
| incoming-byte-rate | meter | Bytes/second read off all brokers |
|
||||
| incoming-byte-rate-for-broker-<broker-id> | meter | Bytes/second read off a given broker |
|
||||
| outgoing-byte-rate | meter | Bytes/second written off all brokers |
|
||||
| outgoing-byte-rate-for-broker-<broker-id> | meter | Bytes/second written off a given broker |
|
||||
| request-rate | meter | Requests/second sent to all brokers |
|
||||
| request-rate-for-broker-<broker-id> | meter | Requests/second sent to a given broker |
|
||||
| request-size | histogram | Distribution of the request size in bytes for all brokers |
|
||||
| request-size-for-broker-<broker-id> | histogram | Distribution of the request size in bytes for a given broker |
|
||||
| request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
|
||||
| request-latency-in-ms-for-broker-<broker-id> | histogram | Distribution of the request latency in ms for a given broker |
|
||||
| response-rate | meter | Responses/second received from all brokers |
|
||||
| response-rate-for-broker-<broker-id> | meter | Responses/second received from a given broker |
|
||||
| response-size | histogram | Distribution of the response size in bytes for all brokers |
|
||||
| response-size-for-broker-<broker-id> | histogram | Distribution of the response size in bytes for a given broker |
|
||||
+----------------------------------------------+------------+---------------------------------------------------------------+
|
||||
|
||||
Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
|
||||
|
||||
Producer related metrics:
|
||||
|
||||
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
|
||||
| Name | Type | Description |
|
||||
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
|
||||
| batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
|
||||
| batch-size-for-topic-<topic> | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
|
||||
| record-send-rate | meter | Records/second sent to all topics |
|
||||
| record-send-rate-for-topic-<topic> | meter | Records/second sent to a given topic |
|
||||
| records-per-request | histogram | Distribution of the number of records sent per request for all topics |
|
||||
| records-per-request-for-topic-<topic> | histogram | Distribution of the number of records sent per request for a given topic |
|
||||
| compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
|
||||
| compression-ratio-for-topic-<topic> | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
|
||||
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
|
||||
|
||||
*/
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Logger is the instance of a StdLogger interface that Sarama writes connection
|
||||
// management events to. By default it is set to discard all log messages via ioutil.Discard,
|
||||
// but you can set it to redirect wherever you want.
|
||||
var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
|
||||
|
||||
// StdLogger is used to log error messages.
|
||||
type StdLogger interface {
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// PanicHandler is called for recovering from panics spawned internally to the library (and thus
|
||||
// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
|
||||
var PanicHandler func(interface{})
|
||||
|
||||
// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
|
||||
// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
|
||||
// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
|
||||
// to process.
|
||||
var MaxRequestSize int32 = 100 * 1024 * 1024
|
||||
|
||||
// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
|
||||
// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
|
||||
// protect the client from running out of memory. Please note that brokers do not have any natural limit on
|
||||
// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
|
||||
// (see https://issues.apache.org/jira/browse/KAFKA-2063).
|
||||
var MaxResponseSize int32 = 100 * 1024 * 1024
|
||||
33
vendor/github.com/Shopify/sarama/sasl_handshake_request.go
generated
vendored
Normal file
33
vendor/github.com/Shopify/sarama/sasl_handshake_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
package sarama
|
||||
|
||||
type SaslHandshakeRequest struct {
|
||||
Mechanism string
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.Mechanism); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.Mechanism, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) key() int16 {
|
||||
return 17
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
||||
38
vendor/github.com/Shopify/sarama/sasl_handshake_response.go
generated
vendored
Normal file
38
vendor/github.com/Shopify/sarama/sasl_handshake_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
package sarama
|
||||
|
||||
type SaslHandshakeResponse struct {
|
||||
Err KError
|
||||
EnabledMechanisms []string
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return pe.putStringArray(r.EnabledMechanisms)
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
var err error
|
||||
if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) key() int16 {
|
||||
return 17
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
||||
100
vendor/github.com/Shopify/sarama/sync_group_request.go
generated
vendored
Normal file
100
vendor/github.com/Shopify/sarama/sync_group_request.go
generated
vendored
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
package sarama
|
||||
|
||||
type SyncGroupRequest struct {
|
||||
GroupId string
|
||||
GenerationId int32
|
||||
MemberId string
|
||||
GroupAssignments map[string][]byte
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
|
||||
return err
|
||||
}
|
||||
for memberId, memberAssignment := range r.GroupAssignments {
|
||||
if err := pe.putString(memberId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(memberAssignment); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.GroupAssignments = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
memberId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
memberAssignment, err := pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.GroupAssignments[memberId] = memberAssignment
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) key() int16 {
|
||||
return 14
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
|
||||
if r.GroupAssignments == nil {
|
||||
r.GroupAssignments = make(map[string][]byte)
|
||||
}
|
||||
|
||||
r.GroupAssignments[memberId] = memberAssignment
|
||||
}
|
||||
|
||||
func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
|
||||
bin, err := encode(memberAssignment, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.AddGroupAssignment(memberId, bin)
|
||||
return nil
|
||||
}
|
||||
40
vendor/github.com/Shopify/sarama/sync_group_response.go
generated
vendored
Normal file
40
vendor/github.com/Shopify/sarama/sync_group_response.go
generated
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
package sarama
|
||||
|
||||
type SyncGroupResponse struct {
|
||||
Err KError
|
||||
MemberAssignment []byte
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
|
||||
assignment := new(ConsumerGroupMemberAssignment)
|
||||
err := decode(r.MemberAssignment, assignment)
|
||||
return assignment, err
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return pe.putBytes(r.MemberAssignment)
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
r.MemberAssignment, err = pd.getBytes()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) key() int16 {
|
||||
return 14
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
164
vendor/github.com/Shopify/sarama/sync_producer.go
generated
vendored
Normal file
164
vendor/github.com/Shopify/sarama/sync_producer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,164 @@
|
|||
package sarama
|
||||
|
||||
import "sync"
|
||||
|
||||
// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
|
||||
// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
|
||||
// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
|
||||
//
|
||||
// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
|
||||
// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
|
||||
// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
|
||||
//
|
||||
// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
|
||||
// be set to true in its configuration.
|
||||
type SyncProducer interface {
|
||||
|
||||
// SendMessage produces a given message, and returns only when it either has
|
||||
// succeeded or failed to produce. It will return the partition and the offset
|
||||
// of the produced message, or an error if the message failed to produce.
|
||||
SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
|
||||
|
||||
// SendMessages produces a given set of messages, and returns only when all
|
||||
// messages in the set have either succeeded or failed. Note that messages
|
||||
// can succeed and fail individually; if some succeed and some fail,
|
||||
// SendMessages will return an error.
|
||||
SendMessages(msgs []*ProducerMessage) error
|
||||
|
||||
// Close shuts down the producer and flushes any messages it may have buffered.
|
||||
// You must call this function before a producer object passes out of scope, as
|
||||
// it may otherwise leak memory. You must call this before calling Close on the
|
||||
// underlying client.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type syncProducer struct {
|
||||
producer *asyncProducer
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
|
||||
func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
|
||||
if config == nil {
|
||||
config = NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
}
|
||||
|
||||
if err := verifyProducerConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducer(addrs, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
|
||||
}
|
||||
|
||||
// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this producer.
|
||||
func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
|
||||
if err := verifyProducerConfig(client.Config()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
|
||||
}
|
||||
|
||||
func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
|
||||
sp := &syncProducer{producer: p}
|
||||
|
||||
sp.wg.Add(2)
|
||||
go withRecover(sp.handleSuccesses)
|
||||
go withRecover(sp.handleErrors)
|
||||
|
||||
return sp
|
||||
}
|
||||
|
||||
func verifyProducerConfig(config *Config) error {
|
||||
if !config.Producer.Return.Errors {
|
||||
return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
|
||||
}
|
||||
if !config.Producer.Return.Successes {
|
||||
return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
|
||||
oldMetadata := msg.Metadata
|
||||
defer func() {
|
||||
msg.Metadata = oldMetadata
|
||||
}()
|
||||
|
||||
expectation := make(chan *ProducerError, 1)
|
||||
msg.Metadata = expectation
|
||||
sp.producer.Input() <- msg
|
||||
|
||||
if err := <-expectation; err != nil {
|
||||
return -1, -1, err.Err
|
||||
}
|
||||
|
||||
return msg.Partition, msg.Offset, nil
|
||||
}
|
||||
|
||||
func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
|
||||
savedMetadata := make([]interface{}, len(msgs))
|
||||
for i := range msgs {
|
||||
savedMetadata[i] = msgs[i].Metadata
|
||||
}
|
||||
defer func() {
|
||||
for i := range msgs {
|
||||
msgs[i].Metadata = savedMetadata[i]
|
||||
}
|
||||
}()
|
||||
|
||||
expectations := make(chan chan *ProducerError, len(msgs))
|
||||
go func() {
|
||||
for _, msg := range msgs {
|
||||
expectation := make(chan *ProducerError, 1)
|
||||
msg.Metadata = expectation
|
||||
sp.producer.Input() <- msg
|
||||
expectations <- expectation
|
||||
}
|
||||
close(expectations)
|
||||
}()
|
||||
|
||||
var errors ProducerErrors
|
||||
for expectation := range expectations {
|
||||
if err := <-expectation; err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *syncProducer) handleSuccesses() {
|
||||
defer sp.wg.Done()
|
||||
for msg := range sp.producer.Successes() {
|
||||
expectation := msg.Metadata.(chan *ProducerError)
|
||||
expectation <- nil
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *syncProducer) handleErrors() {
|
||||
defer sp.wg.Done()
|
||||
for err := range sp.producer.Errors() {
|
||||
expectation := err.Msg.Metadata.(chan *ProducerError)
|
||||
expectation <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *syncProducer) Close() error {
|
||||
sp.producer.AsyncClose()
|
||||
sp.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
152
vendor/github.com/Shopify/sarama/utils.go
generated
vendored
Normal file
152
vendor/github.com/Shopify/sarama/utils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type none struct{}
|
||||
|
||||
// make []int32 sortable so we can sort partition numbers
|
||||
type int32Slice []int32
|
||||
|
||||
func (slice int32Slice) Len() int {
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (slice int32Slice) Less(i, j int) bool {
|
||||
return slice[i] < slice[j]
|
||||
}
|
||||
|
||||
func (slice int32Slice) Swap(i, j int) {
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
func dupeAndSort(input []int32) []int32 {
|
||||
ret := make([]int32, 0, len(input))
|
||||
for _, val := range input {
|
||||
ret = append(ret, val)
|
||||
}
|
||||
|
||||
sort.Sort(int32Slice(ret))
|
||||
return ret
|
||||
}
|
||||
|
||||
func withRecover(fn func()) {
|
||||
defer func() {
|
||||
handler := PanicHandler
|
||||
if handler != nil {
|
||||
if err := recover(); err != nil {
|
||||
handler(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
fn()
|
||||
}
|
||||
|
||||
func safeAsyncClose(b *Broker) {
|
||||
tmp := b // local var prevents clobbering in goroutine
|
||||
go withRecover(func() {
|
||||
if connected, _ := tmp.Connected(); connected {
|
||||
if err := tmp.Close(); err != nil {
|
||||
Logger.Println("Error closing broker", tmp.ID(), ":", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Encoder is a simple interface for any type that can be encoded as an array of bytes
|
||||
// in order to be sent as the key or value of a Kafka message. Length() is provided as an
|
||||
// optimization, and must return the same as len() on the result of Encode().
|
||||
type Encoder interface {
|
||||
Encode() ([]byte, error)
|
||||
Length() int
|
||||
}
|
||||
|
||||
// make strings and byte slices encodable for convenience so they can be used as keys
|
||||
// and/or values in kafka messages
|
||||
|
||||
// StringEncoder implements the Encoder interface for Go strings so that they can be used
|
||||
// as the Key or Value in a ProducerMessage.
|
||||
type StringEncoder string
|
||||
|
||||
func (s StringEncoder) Encode() ([]byte, error) {
|
||||
return []byte(s), nil
|
||||
}
|
||||
|
||||
func (s StringEncoder) Length() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
|
||||
// as the Key or Value in a ProducerMessage.
|
||||
type ByteEncoder []byte
|
||||
|
||||
func (b ByteEncoder) Encode() ([]byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b ByteEncoder) Length() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
|
||||
// reads that trigger syscalls.
|
||||
type bufConn struct {
|
||||
net.Conn
|
||||
buf *bufio.Reader
|
||||
}
|
||||
|
||||
func newBufConn(conn net.Conn) *bufConn {
|
||||
return &bufConn{
|
||||
Conn: conn,
|
||||
buf: bufio.NewReader(conn),
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *bufConn) Read(b []byte) (n int, err error) {
|
||||
return bc.buf.Read(b)
|
||||
}
|
||||
|
||||
// KafkaVersion instances represent versions of the upstream Kafka broker.
|
||||
type KafkaVersion struct {
|
||||
// it's a struct rather than just typing the array directly to make it opaque and stop people
|
||||
// generating their own arbitrary versions
|
||||
version [4]uint
|
||||
}
|
||||
|
||||
func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
|
||||
return KafkaVersion{
|
||||
version: [4]uint{major, minor, veryMinor, patch},
|
||||
}
|
||||
}
|
||||
|
||||
// IsAtLeast return true if and only if the version it is called on is
|
||||
// greater than or equal to the version passed in:
|
||||
// V1.IsAtLeast(V2) // false
|
||||
// V2.IsAtLeast(V1) // true
|
||||
func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
|
||||
for i := range v.version {
|
||||
if v.version[i] > other.version[i] {
|
||||
return true
|
||||
} else if v.version[i] < other.version[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Effective constants defining the supported kafka versions.
|
||||
var (
|
||||
V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
|
||||
V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
|
||||
V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
|
||||
V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
|
||||
V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
|
||||
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
|
||||
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
|
||||
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
|
||||
minVersion = V0_8_2_0
|
||||
)
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
|
|
@ -0,0 +1,292 @@
|
|||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
152
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
152
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build !js,!appengine,!safe,!disableunsafe
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
|
||||
// Commit adf9b30e5594 modified the flags to separate the
|
||||
// flagRO flag into two bits which specifies whether or not the
|
||||
// field is embedded. This causes flagIndir to move over a bit
|
||||
// and means that flagRO is the combination of either of the
|
||||
// original flagRO bit and the new bit.
|
||||
//
|
||||
// This code detects the change by extracting what used to be
|
||||
// the indirect bit to ensure it's set. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are
|
||||
// updated accordingly.
|
||||
if upfv&flagIndir == 0 {
|
||||
flagRO = 3 << 5
|
||||
flagIndir = 1 << 7
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
}
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
return rv
|
||||
}
|
||||
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
||||
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
|
|
@ -0,0 +1,341 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
||||
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
|
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
||||
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
||||
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
|
|
@ -0,0 +1,509 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound == true:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
||||
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
|
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound == true:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
||||
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
22
vendor/github.com/eapache/go-resiliency/LICENSE
generated
vendored
Normal file
22
vendor/github.com/eapache/go-resiliency/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Evan Huus
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
34
vendor/github.com/eapache/go-resiliency/breaker/README.md
generated
vendored
Normal file
34
vendor/github.com/eapache/go-resiliency/breaker/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
circuit-breaker
|
||||
===============
|
||||
|
||||
[](https://travis-ci.org/eapache/go-resiliency)
|
||||
[](https://godoc.org/github.com/eapache/go-resiliency/breaker)
|
||||
[](https://eapache.github.io/conduct.html)
|
||||
|
||||
The circuit-breaker resiliency pattern for golang.
|
||||
|
||||
Creating a breaker takes three parameters:
|
||||
- error threshold (for opening the breaker)
|
||||
- success threshold (for closing the breaker)
|
||||
- timeout (how long to keep the breaker open)
|
||||
|
||||
```go
|
||||
b := breaker.New(3, 1, 5*time.Second)
|
||||
|
||||
for {
|
||||
result := b.Run(func() error {
|
||||
// communicate with some external service and
|
||||
// return an error if the communication failed
|
||||
return nil
|
||||
})
|
||||
|
||||
switch result {
|
||||
case nil:
|
||||
// success!
|
||||
case breaker.ErrBreakerOpen:
|
||||
// our function wasn't run because the breaker was open
|
||||
default:
|
||||
// some other error
|
||||
}
|
||||
}
|
||||
```
|
||||
161
vendor/github.com/eapache/go-resiliency/breaker/breaker.go
generated
vendored
Normal file
161
vendor/github.com/eapache/go-resiliency/breaker/breaker.go
generated
vendored
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
// Package breaker implements the circuit-breaker resiliency pattern for Go.
|
||||
package breaker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBreakerOpen is the error returned from Run() when the function is not executed
|
||||
// because the breaker is currently open.
|
||||
var ErrBreakerOpen = errors.New("circuit breaker is open")
|
||||
|
||||
const (
|
||||
closed uint32 = iota
|
||||
open
|
||||
halfOpen
|
||||
)
|
||||
|
||||
// Breaker implements the circuit-breaker resiliency pattern
|
||||
type Breaker struct {
|
||||
errorThreshold, successThreshold int
|
||||
timeout time.Duration
|
||||
|
||||
lock sync.Mutex
|
||||
state uint32
|
||||
errors, successes int
|
||||
lastError time.Time
|
||||
}
|
||||
|
||||
// New constructs a new circuit-breaker that starts closed.
|
||||
// From closed, the breaker opens if "errorThreshold" errors are seen
|
||||
// without an error-free period of at least "timeout". From open, the
|
||||
// breaker half-closes after "timeout". From half-open, the breaker closes
|
||||
// after "successThreshold" consecutive successes, or opens on a single error.
|
||||
func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker {
|
||||
return &Breaker{
|
||||
errorThreshold: errorThreshold,
|
||||
successThreshold: successThreshold,
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Run will either return ErrBreakerOpen immediately if the circuit-breaker is
|
||||
// already open, or it will run the given function and pass along its return
|
||||
// value. It is safe to call Run concurrently on the same Breaker.
|
||||
func (b *Breaker) Run(work func() error) error {
|
||||
state := atomic.LoadUint32(&b.state)
|
||||
|
||||
if state == open {
|
||||
return ErrBreakerOpen
|
||||
}
|
||||
|
||||
return b.doWork(state, work)
|
||||
}
|
||||
|
||||
// Go will either return ErrBreakerOpen immediately if the circuit-breaker is
|
||||
// already open, or it will run the given function in a separate goroutine.
|
||||
// If the function is run, Go will return nil immediately, and will *not* return
|
||||
// the return value of the function. It is safe to call Go concurrently on the
|
||||
// same Breaker.
|
||||
func (b *Breaker) Go(work func() error) error {
|
||||
state := atomic.LoadUint32(&b.state)
|
||||
|
||||
if state == open {
|
||||
return ErrBreakerOpen
|
||||
}
|
||||
|
||||
// errcheck complains about ignoring the error return value, but
|
||||
// that's on purpose; if you want an error from a goroutine you have to
|
||||
// get it over a channel or something
|
||||
go b.doWork(state, work)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Breaker) doWork(state uint32, work func() error) error {
|
||||
var panicValue interface{}
|
||||
|
||||
result := func() error {
|
||||
defer func() {
|
||||
panicValue = recover()
|
||||
}()
|
||||
return work()
|
||||
}()
|
||||
|
||||
if result == nil && panicValue == nil && state == closed {
|
||||
// short-circuit the normal, success path without contending
|
||||
// on the lock
|
||||
return nil
|
||||
}
|
||||
|
||||
// oh well, I guess we have to contend on the lock
|
||||
b.processResult(result, panicValue)
|
||||
|
||||
if panicValue != nil {
|
||||
// as close as Go lets us come to a "rethrow" although unfortunately
|
||||
// we lose the original panicing location
|
||||
panic(panicValue)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (b *Breaker) processResult(result error, panicValue interface{}) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if result == nil && panicValue == nil {
|
||||
if b.state == halfOpen {
|
||||
b.successes++
|
||||
if b.successes == b.successThreshold {
|
||||
b.closeBreaker()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if b.errors > 0 {
|
||||
expiry := b.lastError.Add(b.timeout)
|
||||
if time.Now().After(expiry) {
|
||||
b.errors = 0
|
||||
}
|
||||
}
|
||||
|
||||
switch b.state {
|
||||
case closed:
|
||||
b.errors++
|
||||
if b.errors == b.errorThreshold {
|
||||
b.openBreaker()
|
||||
} else {
|
||||
b.lastError = time.Now()
|
||||
}
|
||||
case halfOpen:
|
||||
b.openBreaker()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Breaker) openBreaker() {
|
||||
b.changeState(open)
|
||||
go b.timer()
|
||||
}
|
||||
|
||||
func (b *Breaker) closeBreaker() {
|
||||
b.changeState(closed)
|
||||
}
|
||||
|
||||
func (b *Breaker) timer() {
|
||||
time.Sleep(b.timeout)
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.changeState(halfOpen)
|
||||
}
|
||||
|
||||
func (b *Breaker) changeState(newState uint32) {
|
||||
b.errors = 0
|
||||
b.successes = 0
|
||||
atomic.StoreUint32(&b.state, newState)
|
||||
}
|
||||
21
vendor/github.com/eapache/go-xerial-snappy/LICENSE
generated
vendored
Normal file
21
vendor/github.com/eapache/go-xerial-snappy/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Evan Huus
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
13
vendor/github.com/eapache/go-xerial-snappy/README.md
generated
vendored
Normal file
13
vendor/github.com/eapache/go-xerial-snappy/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
# go-xerial-snappy
|
||||
|
||||
[](https://travis-ci.org/eapache/go-xerial-snappy)
|
||||
|
||||
Xerial-compatible Snappy framing support for golang.
|
||||
|
||||
Packages using Xerial for snappy encoding use a framing format incompatible with
|
||||
basically everything else in existence. This package wraps Go's built-in snappy
|
||||
package to support it.
|
||||
|
||||
Apps that use this format include Apache Kafka (see
|
||||
https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for
|
||||
details).
|
||||
43
vendor/github.com/eapache/go-xerial-snappy/snappy.go
generated
vendored
Normal file
43
vendor/github.com/eapache/go-xerial-snappy/snappy.go
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
package snappy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
master "github.com/golang/snappy"
|
||||
)
|
||||
|
||||
var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
|
||||
|
||||
// Encode encodes data as snappy with no framing header.
|
||||
func Encode(src []byte) []byte {
|
||||
return master.Encode(nil, src)
|
||||
}
|
||||
|
||||
// Decode decodes snappy data whether it is traditional unframed
|
||||
// or includes the xerial framing format.
|
||||
func Decode(src []byte) ([]byte, error) {
|
||||
if !bytes.Equal(src[:8], xerialHeader) {
|
||||
return master.Decode(nil, src)
|
||||
}
|
||||
|
||||
var (
|
||||
pos = uint32(16)
|
||||
max = uint32(len(src))
|
||||
dst = make([]byte, 0, len(src))
|
||||
chunk []byte
|
||||
err error
|
||||
)
|
||||
for pos < max {
|
||||
size := binary.BigEndian.Uint32(src[pos : pos+4])
|
||||
pos += 4
|
||||
|
||||
chunk, err = master.Decode(chunk, src[pos:pos+size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pos += size
|
||||
dst = append(dst, chunk...)
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
21
vendor/github.com/eapache/queue/LICENSE
generated
vendored
Normal file
21
vendor/github.com/eapache/queue/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Evan Huus
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
16
vendor/github.com/eapache/queue/README.md
generated
vendored
Normal file
16
vendor/github.com/eapache/queue/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
Queue
|
||||
=====
|
||||
|
||||
[](https://travis-ci.org/eapache/queue)
|
||||
[](https://godoc.org/github.com/eapache/queue)
|
||||
[](https://eapache.github.io/conduct.html)
|
||||
|
||||
A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki.
|
||||
Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
|
||||
substantial memory and time benefits, and fewer GC pauses.
|
||||
|
||||
The queue implemented here is as fast as it is in part because it is *not* thread-safe.
|
||||
|
||||
Follows semantic versioning using https://gopkg.in/ - import from
|
||||
[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1)
|
||||
for guaranteed API stability.
|
||||
102
vendor/github.com/eapache/queue/queue.go
generated
vendored
Normal file
102
vendor/github.com/eapache/queue/queue.go
generated
vendored
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
|
||||
Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
|
||||
substantial memory and time benefits, and fewer GC pauses.
|
||||
|
||||
The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
|
||||
*/
|
||||
package queue
|
||||
|
||||
// minQueueLen is smallest capacity that queue may have.
|
||||
// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
|
||||
const minQueueLen = 16
|
||||
|
||||
// Queue represents a single instance of the queue data structure.
|
||||
type Queue struct {
|
||||
buf []interface{}
|
||||
head, tail, count int
|
||||
}
|
||||
|
||||
// New constructs and returns a new Queue.
|
||||
func New() *Queue {
|
||||
return &Queue{
|
||||
buf: make([]interface{}, minQueueLen),
|
||||
}
|
||||
}
|
||||
|
||||
// Length returns the number of elements currently stored in the queue.
|
||||
func (q *Queue) Length() int {
|
||||
return q.count
|
||||
}
|
||||
|
||||
// resizes the queue to fit exactly twice its current contents
|
||||
// this can result in shrinking if the queue is less than half-full
|
||||
func (q *Queue) resize() {
|
||||
newBuf := make([]interface{}, q.count<<1)
|
||||
|
||||
if q.tail > q.head {
|
||||
copy(newBuf, q.buf[q.head:q.tail])
|
||||
} else {
|
||||
n := copy(newBuf, q.buf[q.head:])
|
||||
copy(newBuf[n:], q.buf[:q.tail])
|
||||
}
|
||||
|
||||
q.head = 0
|
||||
q.tail = q.count
|
||||
q.buf = newBuf
|
||||
}
|
||||
|
||||
// Add puts an element on the end of the queue.
|
||||
func (q *Queue) Add(elem interface{}) {
|
||||
if q.count == len(q.buf) {
|
||||
q.resize()
|
||||
}
|
||||
|
||||
q.buf[q.tail] = elem
|
||||
// bitwise modulus
|
||||
q.tail = (q.tail + 1) & (len(q.buf) - 1)
|
||||
q.count++
|
||||
}
|
||||
|
||||
// Peek returns the element at the head of the queue. This call panics
|
||||
// if the queue is empty.
|
||||
func (q *Queue) Peek() interface{} {
|
||||
if q.count <= 0 {
|
||||
panic("queue: Peek() called on empty queue")
|
||||
}
|
||||
return q.buf[q.head]
|
||||
}
|
||||
|
||||
// Get returns the element at index i in the queue. If the index is
|
||||
// invalid, the call will panic. This method accepts both positive and
|
||||
// negative index values. Index 0 refers to the first element, and
|
||||
// index -1 refers to the last.
|
||||
func (q *Queue) Get(i int) interface{} {
|
||||
// If indexing backwards, convert to positive index.
|
||||
if i < 0 {
|
||||
i += q.count
|
||||
}
|
||||
if i < 0 || i >= q.count {
|
||||
panic("queue: Get() called with index out of range")
|
||||
}
|
||||
// bitwise modulus
|
||||
return q.buf[(q.head+i)&(len(q.buf)-1)]
|
||||
}
|
||||
|
||||
// Remove removes and returns the element from the front of the queue. If the
|
||||
// queue is empty, the call will panic.
|
||||
func (q *Queue) Remove() interface{} {
|
||||
if q.count <= 0 {
|
||||
panic("queue: Remove() called on empty queue")
|
||||
}
|
||||
ret := q.buf[q.head]
|
||||
q.buf[q.head] = nil
|
||||
// bitwise modulus
|
||||
q.head = (q.head + 1) & (len(q.buf) - 1)
|
||||
q.count--
|
||||
// Resize down if buffer 1/4 full.
|
||||
if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
|
||||
q.resize()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
31
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
Normal file
31
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
Go support for Protocol Buffers - Google's data interchange format
|
||||
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
Normal file
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
|
||||
make
|
||||
229
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
Normal file
229
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(pb Message) Message {
|
||||
in := reflect.ValueOf(pb)
|
||||
if in.IsNil() {
|
||||
return pb
|
||||
}
|
||||
|
||||
out := reflect.New(in.Type().Elem())
|
||||
// out is empty so a merge is a deep copy.
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
return out.Interface().(Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
||||
panic("proto: type mismatch")
|
||||
}
|
||||
if in.IsNil() {
|
||||
// Merging nil into non-nil is a quiet no-op
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, ok := extendable(in.Addr().Interface()); ok {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
||||
970
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
Normal file
970
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
Normal file
|
|
@ -0,0 +1,970 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// The fundamental decoders that interpret bytes on the wire.
|
||||
// Those that take integer types all return uint64 and are
|
||||
// therefore of type valueDecoder.
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
// x -= 0x80 << 63 // Always zero.
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// These are not ValueDecoders: they produce an array of bytes or a string.
|
||||
// bytes, embedded messages
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
||||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
||||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
|
||||
oi := o.index
|
||||
|
||||
err := o.skip(t, tag, wire)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !unrecField.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ptr := structPointer_Bytes(base, unrecField)
|
||||
|
||||
// Add the skipped field to struct field
|
||||
obuf := o.buf
|
||||
|
||||
o.buf = *ptr
|
||||
o.EncodeVarint(uint64(tag<<3 | wire))
|
||||
*ptr = append(o.buf, obuf[oi:o.index]...)
|
||||
|
||||
o.buf = obuf
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
|
||||
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
switch wire {
|
||||
case WireVarint:
|
||||
_, err = o.DecodeVarint()
|
||||
case WireFixed64:
|
||||
_, err = o.DecodeFixed64()
|
||||
case WireBytes:
|
||||
_, err = o.DecodeRawBytes(false)
|
||||
case WireFixed32:
|
||||
_, err = o.DecodeFixed32()
|
||||
case WireStartGroup:
|
||||
for {
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fwire := int(u & 0x7)
|
||||
if fwire == WireEndGroup {
|
||||
break
|
||||
}
|
||||
ftag := int(u >> 3)
|
||||
err = o.skip(t, ftag, fwire)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The method should reset the receiver before
|
||||
// decoding starts. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
return UnmarshalMerge(buf, pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
typ, base, err := getbase(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
typ, base, err := getbase(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
|
||||
|
||||
if collectStats {
|
||||
stats.Decode++
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// unmarshalType does the work of unmarshaling a structure.
|
||||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
|
||||
var state errorState
|
||||
required, reqFields := prop.reqCount, uint64(0)
|
||||
|
||||
var err error
|
||||
for err == nil && o.index < len(o.buf) {
|
||||
oi := o.index
|
||||
var u uint64
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
wire := int(u & 0x7)
|
||||
if wire == WireEndGroup {
|
||||
if is_group {
|
||||
if required > 0 {
|
||||
// Not enough information to determine the exact field.
|
||||
// (See below.)
|
||||
return &RequiredNotSetError{"{Unknown}"}
|
||||
}
|
||||
return nil // input is satisfied
|
||||
}
|
||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||
}
|
||||
tag := int(u >> 3)
|
||||
if tag <= 0 {
|
||||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
|
||||
}
|
||||
fieldnum, ok := prop.decoderTags.get(tag)
|
||||
if !ok {
|
||||
// Maybe it's an extension?
|
||||
if prop.extendable {
|
||||
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
extmap := e.extensionsWrite()
|
||||
ext := extmap[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
extmap[int32(tag)] = ext
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Maybe it's a oneof?
|
||||
if prop.oneofUnmarshaler != nil {
|
||||
m := structPointer_Interface(base, st).(Message)
|
||||
// First return value indicates whether tag is a oneof field.
|
||||
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
|
||||
if err == ErrInternalBadWireType {
|
||||
// Map the error to something more descriptive.
|
||||
// Do the formatting here to save generated code space.
|
||||
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
|
||||
}
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
|
||||
continue
|
||||
}
|
||||
p := prop.Prop[fieldnum]
|
||||
|
||||
if p.dec == nil {
|
||||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
|
||||
continue
|
||||
}
|
||||
dec := p.dec
|
||||
if wire != WireStartGroup && wire != p.WireType {
|
||||
if wire == WireBytes && p.packedDec != nil {
|
||||
// a packable field
|
||||
dec = p.packedDec
|
||||
} else {
|
||||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
|
||||
continue
|
||||
}
|
||||
}
|
||||
decErr := dec(o, p, base)
|
||||
if decErr != nil && !state.shouldContinue(decErr, p) {
|
||||
err = decErr
|
||||
}
|
||||
if err == nil && p.Required {
|
||||
// Successfully decoded a required field.
|
||||
if tag <= 64 {
|
||||
// use bitmap for fields 1-64 to catch field reuse.
|
||||
var mask uint64 = 1 << uint64(tag-1)
|
||||
if reqFields&mask == 0 {
|
||||
// new required field
|
||||
reqFields |= mask
|
||||
required--
|
||||
}
|
||||
} else {
|
||||
// This is imprecise. It can be fooled by a required field
|
||||
// with a tag > 64 that is encoded twice; that's very rare.
|
||||
// A fully correct implementation would require allocating
|
||||
// a data structure, which we would like to avoid.
|
||||
required--
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if is_group {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if state.err != nil {
|
||||
return state.err
|
||||
}
|
||||
if required > 0 {
|
||||
// Not enough information to determine the exact field. If we use extra
|
||||
// CPU, we could determine the field only if the missing required field
|
||||
// has a tag <= 64 and we check reqFields.
|
||||
return &RequiredNotSetError{"{Unknown}"}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Individual type decoders
|
||||
// For each,
|
||||
// u is the decoded value,
|
||||
// v is a pointer to the field (pointer) in the struct
|
||||
|
||||
// Sizes of the pools to allocate inside the Buffer.
|
||||
// The goal is modest amortization and allocation
|
||||
// on at least 16-byte boundaries.
|
||||
const (
|
||||
boolPoolSize = 16
|
||||
uint32PoolSize = 8
|
||||
uint64PoolSize = 4
|
||||
)
|
||||
|
||||
// Decode a bool.
|
||||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(o.bools) == 0 {
|
||||
o.bools = make([]bool, boolPoolSize)
|
||||
}
|
||||
o.bools[0] = u != 0
|
||||
*structPointer_Bool(base, p.field) = &o.bools[0]
|
||||
o.bools = o.bools[1:]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_BoolVal(base, p.field) = u != 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int32.
|
||||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int64.
|
||||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64_Set(structPointer_Word64(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a string.
|
||||
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_String(base, p.field) = &s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_StringVal(base, p.field) = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bytes ([]byte).
|
||||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_Bytes(base, p.field) = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool).
|
||||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
*v = append(*v, u != 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded bools
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
|
||||
y := *v
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
y = append(y, u != 0)
|
||||
}
|
||||
|
||||
*v = y
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32).
|
||||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
structPointer_Word32Slice(base, p.field).Append(uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word32Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int32s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(uint32(u))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64).
|
||||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
structPointer_Word64Slice(base, p.field).Append(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word64Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int64s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(u)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of strings ([]string).
|
||||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_StringSlice(base, p.field)
|
||||
*v = append(*v, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of slice of bytes ([][]byte).
|
||||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BytesSlice(base, p.field)
|
||||
*v = append(*v, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a map field.
|
||||
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oi := o.index // index at the end of this map entry
|
||||
o.index -= len(raw) // move buffer back to start of map entry
|
||||
|
||||
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
|
||||
if mptr.Elem().IsNil() {
|
||||
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
|
||||
}
|
||||
v := mptr.Elem() // map[K]V
|
||||
|
||||
// Prepare addressable doubly-indirect placeholders for the key and value types.
|
||||
// See enc_new_map for why.
|
||||
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
|
||||
keybase := toStructPointer(keyptr.Addr()) // **K
|
||||
|
||||
var valbase structPointer
|
||||
var valptr reflect.Value
|
||||
switch p.mtype.Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
// []byte
|
||||
var dummy []byte
|
||||
valptr = reflect.ValueOf(&dummy) // *[]byte
|
||||
valbase = toStructPointer(valptr) // *[]byte
|
||||
case reflect.Ptr:
|
||||
// message; valptr is **Msg; need to allocate the intermediate pointer
|
||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||
valptr.Set(reflect.New(valptr.Type().Elem()))
|
||||
valbase = toStructPointer(valptr)
|
||||
default:
|
||||
// everything else
|
||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||
valbase = toStructPointer(valptr.Addr()) // **V
|
||||
}
|
||||
|
||||
// Decode.
|
||||
// This parses a restricted wire format, namely the encoding of a message
|
||||
// with two fields. See enc_new_map for the format.
|
||||
for o.index < oi {
|
||||
// tagcode for key and value properties are always a single byte
|
||||
// because they have tags 1 and 2.
|
||||
tagcode := o.buf[o.index]
|
||||
o.index++
|
||||
switch tagcode {
|
||||
case p.mkeyprop.tagcode[0]:
|
||||
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
|
||||
return err
|
||||
}
|
||||
case p.mvalprop.tagcode[0]:
|
||||
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// TODO: Should we silently skip this instead?
|
||||
return fmt.Errorf("proto: bad map data tag %d", raw[0])
|
||||
}
|
||||
}
|
||||
keyelem, valelem := keyptr.Elem(), valptr.Elem()
|
||||
if !keyelem.IsValid() {
|
||||
keyelem = reflect.Zero(p.mtype.Key())
|
||||
}
|
||||
if !valelem.IsValid() {
|
||||
valelem = reflect.Zero(p.mtype.Elem())
|
||||
}
|
||||
|
||||
v.SetMapIndex(keyelem, valelem)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a group.
|
||||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
return o.unmarshalType(p.stype, p.sprop, true, bas)
|
||||
}
|
||||
|
||||
// Decode an embedded message.
|
||||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
|
||||
raw, e := o.DecodeRawBytes(false)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := structPointer_Interface(bas, p.stype)
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, false, bas)
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode a slice of embedded messages.
|
||||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, false, base)
|
||||
}
|
||||
|
||||
// Decode a slice of embedded groups.
|
||||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, true, base)
|
||||
}
|
||||
|
||||
// Decode a slice of structs ([]*struct).
|
||||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
|
||||
v := reflect.New(p.stype)
|
||||
bas := toStructPointer(v)
|
||||
structPointer_StructPointerSlice(base, p.field).Append(bas)
|
||||
|
||||
if is_group {
|
||||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := v.Interface()
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
||||
1362
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
Normal file
1362
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
300
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
Normal file
300
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
Normal file
|
|
@ -0,0 +1,300 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
b1, ok := f1.Interface().(raw)
|
||||
if ok {
|
||||
b2 := f2.Interface().(raw)
|
||||
// RawMessage
|
||||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
if !bytes.Equal(u1, u2) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1, m2 := e1.value, e2.value
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
587
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
Normal file
587
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
Normal file
|
|
@ -0,0 +1,587 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, bool) {
|
||||
if ep, ok := p.(extendableProto); ok {
|
||||
return ep, ok
|
||||
}
|
||||
if ep, ok := p.(extendableProtoV1); ok {
|
||||
return extensionAdapter{ep}, ok
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
}
|
||||
|
||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
value interface{}
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, ok := extendable(base)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
||||
func encodeExtensions(e *XXX_InternalExtensions) error {
|
||||
m, mu := e.extensionsRead()
|
||||
if m == nil {
|
||||
return nil // fast path
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return encodeExtensionsMap(m)
|
||||
}
|
||||
|
||||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
||||
func encodeExtensionsMap(m map[int32]Extension) error {
|
||||
for k, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
p := NewBuffer(nil)
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
e.enc = p.buf
|
||||
m[k] = e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func extensionsSize(e *XXX_InternalExtensions) (n int) {
|
||||
m, mu := e.extensionsRead()
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return extensionsMapSize(m)
|
||||
}
|
||||
|
||||
func extensionsMapSize(m map[int32]Extension) (n int) {
|
||||
for _, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
n += len(e.enc)
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
n += props.size(props, toStructPointer(x))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok = extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension parses and returns the given extension of pb.
|
||||
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return nil, errors.New("proto: not an extendable proto")
|
||||
}
|
||||
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
o := NewBuffer(b)
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
|
||||
props := extensionProperties(extension)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate a "field" to store the pointer/slice itself; the
|
||||
// pointer/slice will be stored here. We pass
|
||||
// the address of this field to props.dec.
|
||||
// This passes a zero field and a *t and lets props.dec
|
||||
// interpret it as a *struct{ x t }.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
for {
|
||||
// Discard wire type and field number varint. It isn't needed.
|
||||
if _, err := o.DecodeVarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.index >= len(o.buf) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return nil, errors.New("proto: not an extendable proto")
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return errors.New("proto: not an extendable proto")
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return errors.New("proto: bad extension value type")
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: value}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, ok := extendable(pb)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
898
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
Normal file
898
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
Normal file
|
|
@ -0,0 +1,898 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Getters are only generated for message and oneof fields.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// Stats records allocation details about the protocol buffer encoders
|
||||
// and decoders. Useful for tuning the library itself.
|
||||
type Stats struct {
|
||||
Emalloc uint64 // mallocs in encode
|
||||
Dmalloc uint64 // mallocs in decode
|
||||
Encode uint64 // number of encodes
|
||||
Decode uint64 // number of decodes
|
||||
Chit uint64 // number of cache hits
|
||||
Cmiss uint64 // number of cache misses
|
||||
Size uint64 // number of sizes
|
||||
}
|
||||
|
||||
// Set to true to enable stats collection.
|
||||
const collectStats = false
|
||||
|
||||
var stats Stats
|
||||
|
||||
// GetStats returns a copy of the global Stats structure.
|
||||
func GetStats() Stats { return stats }
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
uint32s []uint32
|
||||
uint64s []uint64
|
||||
|
||||
// extra pools, only used with pointer_reflect.go
|
||||
int32s []int32
|
||||
int64s []int64
|
||||
float32s []float32
|
||||
float64s []float64
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
// The easiest way to sort them in some deterministic order is to use fmt.
|
||||
// If this turns out to be inefficient we can always consider other options,
|
||||
// such as doing a Schwartzian transform.
|
||||
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{
|
||||
vs: vs,
|
||||
// default Less function: textual comparison
|
||||
less: func(a, b reflect.Value) bool {
|
||||
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
|
||||
},
|
||||
}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
|
||||
// numeric keys are sorted numerically.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion1 = true
|
||||
311
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
Normal file
311
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
Normal file
|
|
@ -0,0 +1,311 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
if ms.find(pb) != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
if err := encodeExtensions(exts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, _ = exts.extensionsRead()
|
||||
case map[int32]Extension:
|
||||
if err := encodeExtensionsMap(exts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m = exts
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
// Sort extension IDs to provide a deterministic encoding.
|
||||
// See also enc_map in encode.go.
|
||||
ids := make([]int, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, int(id))
|
||||
}
|
||||
sort.Ints(ids)
|
||||
|
||||
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
|
||||
for _, id := range ids {
|
||||
e := m[int32(id)]
|
||||
// Remove the wire type and field number varint, as well as the length varint.
|
||||
msg := skipVarint(skipVarint(e.enc))
|
||||
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: Int32(int32(id)),
|
||||
Message: msg,
|
||||
})
|
||||
}
|
||||
return Marshal(ms)
|
||||
}
|
||||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m, _ = exts.extensionsRead()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
// Process the map in key order for deterministic output.
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
||||
|
||||
for i, id := range ids {
|
||||
ext := m[id]
|
||||
if i > 0 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
msd, ok := messageSetMap[id]
|
||||
if !ok {
|
||||
// Unknown type; we can't render it, so skip it.
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||
|
||||
x := ext.value
|
||||
if x == nil {
|
||||
x = reflect.New(msd.t.Elem()).Interface()
|
||||
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := json.Marshal(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Write(d)
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is fairly tricky, and it's not clear that it is needed.
|
||||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
||||
}
|
||||
|
||||
// A global registry of types that can be used in a MessageSet.
|
||||
|
||||
var messageSetMap = make(map[int32]messageSetDesc)
|
||||
|
||||
type messageSetDesc struct {
|
||||
t reflect.Type // pointer to struct
|
||||
name string
|
||||
}
|
||||
|
||||
// RegisterMessageSetType is called from the generated code.
|
||||
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
||||
messageSetMap[fieldNum] = messageSetDesc{
|
||||
t: reflect.TypeOf(m),
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
484
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
Normal file
484
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
Normal file
|
|
@ -0,0 +1,484 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
// The reflect value must itself be a pointer to a struct.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer{v}
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer as an interface value.
|
||||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
||||
return p.v.Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// field returns the given field in the struct as a reflect value.
|
||||
func structPointer_field(p structPointer, f field) reflect.Value {
|
||||
// Special case: an extension map entry with a value of type T
|
||||
// passes a *T to the struct-handling code with a zero field,
|
||||
// expecting that it will be treated as equivalent to *struct{ X T },
|
||||
// which has the same memory layout. We have to handle that case
|
||||
// specially, because reflect will panic if we call FieldByIndex on a
|
||||
// non-struct.
|
||||
if f == nil {
|
||||
return p.v.Elem()
|
||||
}
|
||||
|
||||
return p.v.Elem().FieldByIndex(f)
|
||||
}
|
||||
|
||||
// ifield returns the given field in the struct as an interface value.
|
||||
func structPointer_ifield(p structPointer, f field) interface{} {
|
||||
return structPointer_field(p, f).Addr().Interface()
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return structPointer_ifield(p, f).(*[]byte)
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return structPointer_ifield(p, f).(*[][]byte)
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return structPointer_ifield(p, f).(**bool)
|
||||
}
|
||||
|
||||
// BoolVal returns the address of a bool field in the struct.
|
||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||
return structPointer_ifield(p, f).(*bool)
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return structPointer_ifield(p, f).(*[]bool)
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return structPointer_ifield(p, f).(**string)
|
||||
}
|
||||
|
||||
// StringVal returns the address of a string field in the struct.
|
||||
func structPointer_StringVal(p structPointer, f field) *string {
|
||||
return structPointer_ifield(p, f).(*string)
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return structPointer_ifield(p, f).(*[]string)
|
||||
}
|
||||
|
||||
// Extensions returns the address of an extension map field in the struct.
|
||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
||||
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||
}
|
||||
|
||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||
return structPointer_field(p, f).Addr()
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
structPointer_field(p, f).Set(q.v)
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return structPointer{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
||||
return structPointerSlice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A structPointerSlice represents the address of a slice of pointers to structs
|
||||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
||||
type structPointerSlice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p structPointerSlice) Len() int { return p.v.Len() }
|
||||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
||||
func (p structPointerSlice) Append(q structPointer) {
|
||||
p.v.Set(reflect.Append(p.v, q.v))
|
||||
}
|
||||
|
||||
var (
|
||||
int32Type = reflect.TypeOf(int32(0))
|
||||
uint32Type = reflect.TypeOf(uint32(0))
|
||||
float32Type = reflect.TypeOf(float32(0))
|
||||
int64Type = reflect.TypeOf(int64(0))
|
||||
uint64Type = reflect.TypeOf(uint64(0))
|
||||
float64Type = reflect.TypeOf(float64(0))
|
||||
)
|
||||
|
||||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
||||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
||||
type word32 struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Set sets p to point at a newly allocated word with bits set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int32Type:
|
||||
if len(o.int32s) == 0 {
|
||||
o.int32s = make([]int32, uint32PoolSize)
|
||||
}
|
||||
o.int32s[0] = int32(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
||||
o.int32s = o.int32s[1:]
|
||||
return
|
||||
case uint32Type:
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
||||
o.uint32s = o.uint32s[1:]
|
||||
return
|
||||
case float32Type:
|
||||
if len(o.float32s) == 0 {
|
||||
o.float32s = make([]float32, uint32PoolSize)
|
||||
}
|
||||
o.float32s[0] = math.Float32frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
||||
o.float32s = o.float32s[1:]
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.Set(reflect.New(t))
|
||||
p.v.Elem().SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32_Get(p word32) uint32 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
||||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
||||
type word32Val struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// Set sets *p to x.
|
||||
func word32Val_Set(p word32Val, x uint32) {
|
||||
switch p.v.Type() {
|
||||
case int32Type:
|
||||
p.v.SetInt(int64(x))
|
||||
return
|
||||
case uint32Type:
|
||||
p.v.SetUint(uint64(x))
|
||||
return
|
||||
case float32Type:
|
||||
p.v.SetFloat(float64(math.Float32frombits(x)))
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32Val_Get(p word32Val) uint32 {
|
||||
elem := p.v
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||
return word32Val{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
||||
type word32Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word32Slice) Append(x uint32) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
elem.SetInt(int64(int32(x)))
|
||||
case reflect.Uint32:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float32:
|
||||
elem.SetFloat(float64(math.Float32frombits(x)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p word32Slice) Len() int {
|
||||
return p.v.Len()
|
||||
}
|
||||
|
||||
func (p word32Slice) Index(i int) uint32 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
||||
return word32Slice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int64Type:
|
||||
if len(o.int64s) == 0 {
|
||||
o.int64s = make([]int64, uint64PoolSize)
|
||||
}
|
||||
o.int64s[0] = int64(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
||||
o.int64s = o.int64s[1:]
|
||||
return
|
||||
case uint64Type:
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
||||
o.uint64s = o.uint64s[1:]
|
||||
return
|
||||
case float64Type:
|
||||
if len(o.float64s) == 0 {
|
||||
o.float64s = make([]float64, uint64PoolSize)
|
||||
}
|
||||
o.float64s[0] = math.Float64frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
||||
o.float64s = o.float64s[1:]
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// word64Val is like word32Val but for 64-bit values.
|
||||
type word64Val struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||
switch p.v.Type() {
|
||||
case int64Type:
|
||||
p.v.SetInt(int64(x))
|
||||
return
|
||||
case uint64Type:
|
||||
p.v.SetUint(x)
|
||||
return
|
||||
case float64Type:
|
||||
p.v.SetFloat(math.Float64frombits(x))
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64Val_Get(p word64Val) uint64 {
|
||||
elem := p.v
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||
return word64Val{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
type word64Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word64Slice) Append(x uint64) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
elem.SetInt(int64(int64(x)))
|
||||
case reflect.Uint64:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float64:
|
||||
elem.SetFloat(float64(math.Float64frombits(x)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p word64Slice) Len() int {
|
||||
return p.v.Len()
|
||||
}
|
||||
|
||||
func (p word64Slice) Index(i int) uint64 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return uint64(elem.Uint())
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(float64(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
||||
return word64Slice{structPointer_field(p, f)}
|
||||
}
|
||||
270
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
Normal file
270
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// NOTE: These type_Foo functions would more idiomatically be methods,
|
||||
// but Go does not allow methods on pointer types, and we must preserve
|
||||
// some pointer type for the garbage collector. We use these
|
||||
// funcs with clunky names as our poor approximation to methods.
|
||||
//
|
||||
// An alternative would be
|
||||
// type structPointer struct { p unsafe.Pointer }
|
||||
// but that does not registerize as well.
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer unsafe.Pointer
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer(unsafe.Pointer(v.Pointer()))
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p == nil
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer, assumed to have element type t,
|
||||
// as an interface value.
|
||||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
|
||||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != ^field(0)
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolVal returns the address of a bool field in the struct.
|
||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringVal returns the address of a string field in the struct.
|
||||
func structPointer_StringVal(p structPointer, f field) *string {
|
||||
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
|
||||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
||||
type structPointerSlice []structPointer
|
||||
|
||||
func (v *structPointerSlice) Len() int { return len(*v) }
|
||||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
|
||||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
|
||||
|
||||
// A word32 is the address of a "pointer to 32-bit value" field.
|
||||
type word32 **uint32
|
||||
|
||||
// IsNil reports whether *v is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
// Set sets *v to point at a newly allocated word set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
*p = &o.uint32s[0]
|
||||
o.uint32s = o.uint32s[1:]
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by *v.
|
||||
func word32_Get(p word32) uint32 {
|
||||
return **p
|
||||
}
|
||||
|
||||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Val is the address of a 32-bit value field.
|
||||
type word32Val *uint32
|
||||
|
||||
// Set sets *p to x.
|
||||
func word32Val_Set(p word32Val, x uint32) {
|
||||
*p = x
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by p.
|
||||
func word32Val_Get(p word32Val) uint32 {
|
||||
return *p
|
||||
}
|
||||
|
||||
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
type word32Slice []uint32
|
||||
|
||||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
|
||||
func (v *word32Slice) Len() int { return len(*v) }
|
||||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
|
||||
|
||||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
|
||||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 **uint64
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
*p = &o.uint64s[0]
|
||||
o.uint64s = o.uint64s[1:]
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
return **p
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// word64Val is like word32Val but for 64-bit values.
|
||||
type word64Val *uint64
|
||||
|
||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||
*p = x
|
||||
}
|
||||
|
||||
func word64Val_Get(p word64Val) uint64 {
|
||||
return *p
|
||||
}
|
||||
|
||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// word64Slice is like word32Slice but for 64-bit values.
|
||||
type word64Slice []uint64
|
||||
|
||||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
|
||||
func (v *word64Slice) Len() int { return len(*v) }
|
||||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
|
||||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue